diff options
Diffstat (limited to 'drivers/gpu')
1007 files changed, 27315 insertions, 16799 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 1df4e627e3d3..5504721007cc 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -9,9 +9,6 @@ menuconfig DRM tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)" depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && HAS_DMA select DRM_PANEL_ORIENTATION_QUIRKS - select DRM_KMS_HELPER if DRM_FBDEV_EMULATION - select FB_CORE if DRM_FBDEV_EMULATION - select FB_SYSMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION select HDMI select I2C select DMA_SHARED_BUFFER @@ -152,6 +149,7 @@ config DRM_PANIC_SCREEN config DRM_PANIC_SCREEN_QR_CODE bool "Add a panic screen with a QR code" depends on DRM_PANIC && RUST + select ZLIB_DEFLATE help This option adds a QR code generator, and a panic screen with a QR code. The QR code will contain the last lines of kmsg and other debug @@ -210,10 +208,28 @@ config DRM_DEBUG_MODESET_LOCK If in doubt, say "N". -config DRM_CLIENT_SELECTION +config DRM_CLIENT bool depends on DRM - select DRM_CLIENT_SETUP if DRM_FBDEV_EMULATION + help + Enables support for DRM clients. DRM drivers that need + struct drm_client_dev and its interfaces should select this + option. Drivers that support the default clients should + select DRM_CLIENT_SELECTION instead. + +config DRM_CLIENT_LIB + tristate + depends on DRM + select DRM_KMS_HELPER if DRM_FBDEV_EMULATION + select FB_CORE if DRM_FBDEV_EMULATION + help + This option enables the DRM client library and selects all + modules and components according to the enabled clients. + +config DRM_CLIENT_SELECTION + tristate + depends on DRM + select DRM_CLIENT_LIB if DRM_FBDEV_EMULATION help Drivers that support in-kernel DRM clients have to select this option. @@ -221,10 +237,18 @@ config DRM_CLIENT_SELECTION config DRM_CLIENT_SETUP bool depends on DRM_CLIENT_SELECTION + help + Enables the DRM client selection. DRM drivers that support the + default clients should select DRM_CLIENT_SELECTION instead. + +menu "Supported DRM clients" + depends on DRM_CLIENT_SELECTION config DRM_FBDEV_EMULATION bool "Enable legacy fbdev support for your modesetting driver" - depends on DRM + depends on DRM_CLIENT_SELECTION + select DRM_CLIENT + select DRM_CLIENT_SETUP select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE default FB help @@ -263,6 +287,8 @@ config DRM_FBDEV_LEAK_PHYS_SMEM If in doubt, say "N" or spread the word to your closed source library vendor. +endmenu + config DRM_LOAD_EDID_FIRMWARE bool "Allow to specify an EDID data set instead of probing for it" depends on DRM @@ -332,19 +358,21 @@ config DRM_TTM_HELPER tristate depends on DRM select DRM_TTM + select FB_SYSMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION help Helpers for ttm-based gem objects config DRM_GEM_DMA_HELPER tristate depends on DRM - select FB_DMAMEM_HELPERS if DRM_FBDEV_EMULATION + select FB_DMAMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION help Choose this if you need the GEM DMA helper functions config DRM_GEM_SHMEM_HELPER tristate depends on DRM && MMU + select FB_SYSMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION help Choose this if you need the GEM shmem helper functions diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 3894f43f6d47..463afad1b5ca 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -34,15 +34,12 @@ endif subdir-ccflags-$(CONFIG_DRM_WERROR) += -Werror drm-y := \ - drm_aperture.o \ drm_atomic.o \ drm_atomic_uapi.o \ drm_auth.o \ drm_blend.o \ drm_bridge.o \ drm_cache.o \ - drm_client.o \ - drm_client_modeset.o \ drm_color_mgmt.o \ drm_connector.o \ drm_crtc.o \ @@ -76,6 +73,10 @@ drm-y := \ drm_vblank_work.o \ drm_vma_manager.o \ drm_writeback.o +drm-$(CONFIG_DRM_CLIENT) += \ + drm_client.o \ + drm_client_event.o \ + drm_client_modeset.o drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o drm-$(CONFIG_COMPAT) += drm_ioc32.o drm-$(CONFIG_DRM_PANEL) += drm_panel.o @@ -143,15 +144,19 @@ drm_kms_helper-y := \ drm_probe_helper.o \ drm_self_refresh_helper.o \ drm_simple_kms_helper.o -drm_kms_helper-$(CONFIG_DRM_CLIENT_SETUP) += \ - drm_client_setup.o drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o -drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += \ - drm_fbdev_client.o \ - drm_fb_helper.o +drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o # +# DRM clients +# + +drm_client_lib-y := drm_client_setup.o +drm_client_lib-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fbdev_client.o +obj-$(CONFIG_DRM_CLIENT_LIB) += drm_client_lib.o + +# # Drivers and the rest # diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig index 680a94c361ba..41fa3377d9cf 100644 --- a/drivers/gpu/drm/amd/amdgpu/Kconfig +++ b/drivers/gpu/drm/amd/amdgpu/Kconfig @@ -5,6 +5,7 @@ config DRM_AMDGPU depends on DRM && PCI && MMU depends on !UML select FW_LOADER + select DRM_CLIENT select DRM_CLIENT_SELECTION select DRM_DISPLAY_DP_HELPER select DRM_DISPLAY_DSC_HELPER diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c index b0f95a7649bf..f44de9d4b6a1 100644 --- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c +++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c @@ -85,16 +85,9 @@ static int aldebaran_mode2_suspend_ip(struct amdgpu_device *adev) AMD_IP_BLOCK_TYPE_SDMA)) continue; - r = adev->ip_blocks[i].version->funcs->suspend(adev); - - if (r) { - dev_err(adev->dev, - "suspend of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]); + if (r) return r; - } - - adev->ip_blocks[i].status.hw = false; } return 0; @@ -246,7 +239,7 @@ static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev) dev_err(adev->dev, "Failed to get BIF handle\n"); return -EINVAL; } - r = cmn_block->version->funcs->resume(adev); + r = amdgpu_ip_block_resume(cmn_block); if (r) return r; @@ -282,15 +275,10 @@ static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev) adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA)) continue; - r = adev->ip_blocks[i].version->funcs->resume(adev); - if (r) { - dev_err(adev->dev, - "resume of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); - return r; - } - adev->ip_blocks[i].status.hw = true; + r = amdgpu_ip_block_resume(&adev->ip_blocks[i]); + if (r) + return r; } for (i = 0; i < adev->num_ip_blocks; i++) { @@ -304,7 +292,7 @@ static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev) if (adev->ip_blocks[i].version->funcs->late_init) { r = adev->ip_blocks[i].version->funcs->late_init( - (void *)adev); + &adev->ip_blocks[i]); if (r) { dev_err(adev->dev, "late_init of IP block <%s> failed %d after reset\n", @@ -342,6 +330,8 @@ aldebaran_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl, } list_for_each_entry(tmp_adev, reset_device_list, reset_list) { + amdgpu_set_init_level(tmp_adev, + AMDGPU_INIT_LEVEL_RESET_RECOVERY); dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n"); r = aldebaran_mode2_restore_ip(tmp_adev); @@ -387,6 +377,8 @@ aldebaran_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl, tmp_adev); if (!r) { + amdgpu_set_init_level(tmp_adev, + AMDGPU_INIT_LEVEL_DEFAULT); amdgpu_irq_gpu_reset_resume_helper(tmp_adev); r = amdgpu_ib_ring_tests(tmp_adev); @@ -417,6 +409,7 @@ static struct amdgpu_reset_handler aldebaran_mode2_handler = { static struct amdgpu_reset_handler *aldebaran_rst_handlers[AMDGPU_RESET_MAX_HANDLERS] = { &aldebaran_mode2_handler, + &xgmi_reset_on_init_handler, }; int aldebaran_reset_init(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 9b1e0ede05a4..4653a8d2823a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -118,7 +118,7 @@ #define MAX_GPU_INSTANCE 64 -#define GFX_SLICE_PERIOD msecs_to_jiffies(250) +#define GFX_SLICE_PERIOD_MS 250 struct amdgpu_gpu_instance { struct amdgpu_device *adev; @@ -131,10 +131,6 @@ struct amdgpu_mgpu_info { uint32_t num_gpu; uint32_t num_dgpu; uint32_t num_apu; - - /* delayed reset_func for XGMI configuration if necessary */ - struct delayed_work delayed_reset_work; - bool pending_reset; }; enum amdgpu_ss { @@ -303,6 +299,12 @@ extern int amdgpu_wbrf; #define AMDGPU_RESET_VCE (1 << 13) #define AMDGPU_RESET_VCE1 (1 << 14) +/* reset mask */ +#define AMDGPU_RESET_TYPE_FULL (1 << 0) /* full adapter reset, mode1/mode2/BACO/etc. */ +#define AMDGPU_RESET_TYPE_SOFT_RESET (1 << 1) /* IP level soft reset */ +#define AMDGPU_RESET_TYPE_PER_QUEUE (1 << 2) /* per queue */ +#define AMDGPU_RESET_TYPE_PER_PIPE (1 << 3) /* per pipe */ + /* max cursor sizes (in pixels) */ #define CIK_CURSOR_WIDTH 128 #define CIK_CURSOR_HEIGHT 128 @@ -365,8 +367,11 @@ void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev, u64 *flags); int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev, enum amd_ip_block_type block_type); -bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev, +bool amdgpu_device_ip_is_valid(struct amdgpu_device *adev, enum amd_ip_block_type block_type); +int amdgpu_ip_block_suspend(struct amdgpu_ip_block *ip_block); + +int amdgpu_ip_block_resume(struct amdgpu_ip_block *ip_block); #define AMDGPU_MAX_IP_NUM 16 @@ -389,6 +394,7 @@ struct amdgpu_ip_block_version { struct amdgpu_ip_block { struct amdgpu_ip_block_status status; const struct amdgpu_ip_block_version *version; + struct amdgpu_device *adev; }; int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev, @@ -563,6 +569,7 @@ enum amd_reset_method { AMD_RESET_METHOD_MODE2, AMD_RESET_METHOD_BACO, AMD_RESET_METHOD_PCI, + AMD_RESET_METHOD_ON_INIT, }; struct amdgpu_video_codec_info { @@ -821,6 +828,25 @@ struct amdgpu_mqd { struct amdgpu_mqd_prop *p); }; +/* + * Custom Init levels could be defined for different situations where a full + * initialization of all hardware blocks are not expected. Sample cases are + * custom init sequences after resume after S0i3/S3, reset on initialization, + * partial reset of blocks etc. Presently, this defines only two levels. Levels + * are described in corresponding struct definitions - amdgpu_init_default, + * amdgpu_init_minimal_xgmi. + */ +enum amdgpu_init_lvl_id { + AMDGPU_INIT_LEVEL_DEFAULT, + AMDGPU_INIT_LEVEL_MINIMAL_XGMI, + AMDGPU_INIT_LEVEL_RESET_RECOVERY, +}; + +struct amdgpu_init_level { + enum amdgpu_init_lvl_id level; + uint32_t hwini_ip_block_mask; +}; + #define AMDGPU_RESET_MAGIC_NUM 64 #define AMDGPU_MAX_DF_PERFMONS 4 struct amdgpu_reset_domain; @@ -1092,8 +1118,6 @@ struct amdgpu_device { bool in_s3; bool in_s4; bool in_s0ix; - /* indicate amdgpu suspension status */ - bool suspend_complete; enum pp_mp1_state mp1_state; struct amdgpu_doorbell_index doorbell_index; @@ -1166,6 +1190,8 @@ struct amdgpu_device { bool enforce_isolation[MAX_XCP]; /* Added this mutex for cleaner shader isolation between GFX and compute processes */ struct mutex enforce_isolation_mutex; + + struct amdgpu_init_level *init_lvl; }; static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev, @@ -1261,6 +1287,8 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, int amdgpu_do_asic_reset(struct list_head *device_list_handle, struct amdgpu_reset_context *reset_context); +int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context); + int emu_soc_asic_init(struct amdgpu_device *adev); /* @@ -1443,6 +1471,8 @@ struct dma_fence *amdgpu_device_get_gang(struct amdgpu_device *adev); struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev, struct dma_fence *gang); bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev); +ssize_t amdgpu_get_soft_full_reset_mask(struct amdgpu_ring *ring); +ssize_t amdgpu_show_reset_mask(char *buf, uint32_t supported_reset); /* atpx handler */ #if defined(CONFIG_VGA_SWITCHEROO) @@ -1450,23 +1480,15 @@ void amdgpu_register_atpx_handler(void); void amdgpu_unregister_atpx_handler(void); bool amdgpu_has_atpx_dgpu_power_cntl(void); bool amdgpu_is_atpx_hybrid(void); -bool amdgpu_atpx_dgpu_req_power_for_displays(void); bool amdgpu_has_atpx(void); #else static inline void amdgpu_register_atpx_handler(void) {} static inline void amdgpu_unregister_atpx_handler(void) {} static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; } static inline bool amdgpu_is_atpx_hybrid(void) { return false; } -static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false; } static inline bool amdgpu_has_atpx(void) { return false; } #endif -#if defined(CONFIG_VGA_SWITCHEROO) && defined(CONFIG_ACPI) -void *amdgpu_atpx_get_dhandle(void); -#else -static inline void *amdgpu_atpx_get_dhandle(void) { return NULL; } -#endif - /* * KMS */ @@ -1619,4 +1641,6 @@ extern const struct attribute_group amdgpu_vram_mgr_attr_group; extern const struct attribute_group amdgpu_gtt_mgr_attr_group; extern const struct attribute_group amdgpu_flash_attr_group; +void amdgpu_set_init_level(struct amdgpu_device *adev, + enum amdgpu_init_lvl_id lvl); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c index 2ca127173135..9d6345146495 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c @@ -158,7 +158,7 @@ static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_smu_ return -EINVAL; } - if (start + count >= max_count) + if (start + count > max_count) return -EINVAL; count = min_t(int, count, max_count); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c index bf6c4a0d0525..ec5e0dcf8613 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c @@ -98,9 +98,9 @@ enum { ACP_TILE_DSP2, }; -static int acp_sw_init(void *handle) +static int acp_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->acp.parent = adev->dev; @@ -112,9 +112,9 @@ static int acp_sw_init(void *handle) return 0; } -static int acp_sw_fini(void *handle) +static int acp_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->acp.cgs_device) amdgpu_cgs_destroy_device(adev->acp.cgs_device); @@ -219,10 +219,10 @@ static const struct dmi_system_id acp_quirk_table[] = { /** * acp_hw_init - start and test ACP block * - * @handle: handle used to pass amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * */ -static int acp_hw_init(void *handle) +static int acp_hw_init(struct amdgpu_ip_block *ip_block) { int r; u64 acp_base; @@ -230,13 +230,7 @@ static int acp_hw_init(void *handle) u32 count = 0; struct i2s_platform_data *i2s_pdata = NULL; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - const struct amdgpu_ip_block *ip_block = - amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP); - - if (!ip_block) - return -EINVAL; + struct amdgpu_device *adev = ip_block->adev; r = amd_acp_hw_init(adev->acp.cgs_device, ip_block->version->major, ip_block->version->minor); @@ -503,14 +497,14 @@ failure: /** * acp_hw_fini - stop the hardware block * - * @handle: handle used to pass amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * */ -static int acp_hw_fini(void *handle) +static int acp_hw_fini(struct amdgpu_ip_block *ip_block) { u32 val = 0; u32 count = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* return early if no ACP */ if (!adev->acp.acp_genpd) { @@ -565,9 +559,9 @@ static int acp_hw_fini(void *handle) return 0; } -static int acp_suspend(void *handle) +static int acp_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* power up on suspend */ if (!adev->acp.acp_cell) @@ -575,9 +569,9 @@ static int acp_suspend(void *handle) return 0; } -static int acp_resume(void *handle) +static int acp_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* power down again on resume */ if (!adev->acp.acp_cell) @@ -585,26 +579,11 @@ static int acp_resume(void *handle) return 0; } -static int acp_early_init(void *handle) -{ - return 0; -} - static bool acp_is_idle(void *handle) { return true; } -static int acp_wait_for_idle(void *handle) -{ - return 0; -} - -static int acp_soft_reset(void *handle) -{ - return 0; -} - static int acp_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -624,8 +603,6 @@ static int acp_set_powergating_state(void *handle, static const struct amd_ip_funcs acp_ip_funcs = { .name = "acp_ip", - .early_init = acp_early_init, - .late_init = NULL, .sw_init = acp_sw_init, .sw_fini = acp_sw_fini, .hw_init = acp_hw_init, @@ -633,12 +610,8 @@ static const struct amd_ip_funcs acp_ip_funcs = { .suspend = acp_suspend, .resume = acp_resume, .is_idle = acp_is_idle, - .wait_for_idle = acp_wait_for_idle, - .soft_reset = acp_soft_reset, .set_clockgating_state = acp_set_clockgating_state, .set_powergating_state = acp_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; const struct amdgpu_ip_block_version acp_ip_block = { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index f85ace0384d2..b8d4e07d2043 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -147,6 +147,7 @@ static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif, struct acpi_buffer *params) { acpi_status status; + union acpi_object *obj; union acpi_object atif_arg_elements[2]; struct acpi_object_list atif_arg; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; @@ -169,16 +170,24 @@ static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif, status = acpi_evaluate_object(atif->handle, NULL, &atif_arg, &buffer); + obj = (union acpi_object *)buffer.pointer; - /* Fail only if calling the method fails and ATIF is supported */ - if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { + /* Fail if calling the method fails */ + if (ACPI_FAILURE(status)) { DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n", acpi_format_exception(status)); - kfree(buffer.pointer); + kfree(obj); return NULL; } - return buffer.pointer; + if (obj->type != ACPI_TYPE_BUFFER) { + DRM_DEBUG_DRIVER("bad object returned from ATIF: %d\n", + obj->type); + kfree(obj); + return NULL; + } + + return obj; } /** @@ -791,6 +800,7 @@ int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev, return -EIO; } + kfree(info); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 4f08b153cb66..3afcd1e8aa54 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -834,6 +834,9 @@ int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off, if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) return -EINVAL; + if (!kiq_ring->sched.ready || adev->job_hang) + return 0; + ring_funcs = kzalloc(sizeof(*ring_funcs), GFP_KERNEL); if (!ring_funcs) return -ENOMEM; @@ -858,8 +861,14 @@ int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off, kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES, 0, 0); - if (kiq_ring->sched.ready && !adev->job_hang) - r = amdgpu_ring_test_helper(kiq_ring); + /* Submit unmap queue packet */ + amdgpu_ring_commit(kiq_ring); + /* + * Ring test will do a basic scratch register change check. Just run + * this to ensure that unmap queues that is submitted before got + * processed successfully before returning. + */ + r = amdgpu_ring_test_helper(kiq_ring); spin_unlock(&kiq->ring_lock); @@ -889,3 +898,27 @@ int amdgpu_amdkfd_start_sched(struct amdgpu_device *adev, uint32_t node_id) return kgd2kfd_start_sched(adev->kfd.dev, node_id); } + +/* check if there are KFD queues active */ +bool amdgpu_amdkfd_compute_active(struct amdgpu_device *adev, uint32_t node_id) +{ + if (!adev->kfd.init_complete) + return false; + + return kgd2kfd_compute_active(adev->kfd.dev, node_id); +} + +/* Config CGTT_SQ_CLK_CTRL */ +int amdgpu_amdkfd_config_sq_perfmon(struct amdgpu_device *adev, uint32_t xcp_id, + bool core_override_enable, bool reg_override_enable, bool perfmon_override_enable) +{ + int r; + + if (!adev->kfd.init_complete) + return 0; + + r = psp_config_sq_perfmon(&adev->psp, xcp_id, core_override_enable, + reg_override_enable, perfmon_override_enable); + + return r; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index f9d119448442..4b80ad860639 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -266,6 +266,10 @@ int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off, u32 inst); int amdgpu_amdkfd_start_sched(struct amdgpu_device *adev, uint32_t node_id); int amdgpu_amdkfd_stop_sched(struct amdgpu_device *adev, uint32_t node_id); +int amdgpu_amdkfd_config_sq_perfmon(struct amdgpu_device *adev, uint32_t xcp_id, + bool core_override_enable, bool reg_override_enable, bool perfmon_override_enable); +bool amdgpu_amdkfd_compute_active(struct amdgpu_device *adev, uint32_t node_id); + /* Read user wptr from a specified user address space with page fault * disabled. The memory must be pinned and mapped to the hardware when @@ -428,6 +432,7 @@ int kgd2kfd_check_and_lock_kfd(void); void kgd2kfd_unlock_kfd(void); int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id); int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id); +bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id); #else static inline int kgd2kfd_init(void) { @@ -508,5 +513,10 @@ static inline int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id) { return 0; } + +static inline bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id) +{ + return false; +} #endif #endif /* AMDGPU_AMDKFD_H_INCLUDED */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index 3bc0cbf45bc5..cc66ebb7bae1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -944,9 +944,7 @@ static void unlock_spi_csq_mutexes(struct amdgpu_device *adev) * * @adev: Handle of device whose registers are to be read * @queue_idx: Index of queue in the queue-map bit-field - * @wave_cnt: Output parameter updated with number of waves in flight - * @vmid: Output parameter updated with VMID of queue whose wave count - * is being collected + * @queue_cnt: Stores the wave count and doorbell offset for an active queue * @inst: xcc's instance number on a multi-XCC setup */ static void get_wave_count(struct amdgpu_device *adev, int queue_idx, @@ -1133,10 +1131,6 @@ uint64_t kgd_gfx_v9_hqd_get_pq_addr(struct amdgpu_device *adev, uint32_t low, high; uint64_t queue_addr = 0; - if (!adev->debug_exp_resets && - !adev->gfx.num_gfx_rings) - return 0; - kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst); amdgpu_gfx_rlc_enter_safe_mode(adev, inst); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index ce5ca304dba9..f30548f4c3b3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1439,8 +1439,8 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info, list_add_tail(&vm->vm_list_node, &(vm->process_info->vm_list_head)); vm->process_info->n_vms++; - - *ef = dma_fence_get(&vm->process_info->eviction_fence->base); + if (ef) + *ef = dma_fence_get(&vm->process_info->eviction_fence->base); mutex_unlock(&vm->process_info->lock); return 0; @@ -2524,11 +2524,14 @@ int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni, /* First eviction, stop the queues */ r = kgd2kfd_quiesce_mm(mni->mm, KFD_QUEUE_EVICTION_TRIGGER_USERPTR); - if (r) + + if (r && r != -ESRCH) pr_err("Failed to quiesce KFD\n"); - queue_delayed_work(system_freezable_wq, - &process_info->restore_userptr_work, - msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS)); + + if (r != -ESRCH) + queue_delayed_work(system_freezable_wq, + &process_info->restore_userptr_work, + msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS)); } mutex_unlock(&process_info->notifier_lock); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 0c8975ac5af9..093141ad6ed0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -1145,8 +1145,8 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev, return 0; } -void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev, - u32 eng_clock, u32 mem_clock) +int amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev, + u32 eng_clock, u32 mem_clock) { SET_ENGINE_CLOCK_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings); @@ -1161,8 +1161,8 @@ void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev, if (mem_clock) args.sReserved.ulClock = cpu_to_le32(mem_clock & SET_CLOCK_FREQ_MASK); - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, - sizeof(args)); + return amdgpu_atom_execute_table(adev->mode_info.atom_context, index, + (uint32_t *)&args, sizeof(args)); } void amdgpu_atombios_get_default_voltages(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h index 0811474e8fd3..0e16432d9a72 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h @@ -163,8 +163,8 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev, bool strobe_mode, struct atom_mpll_param *mpll_param); -void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev, - u32 eng_clock, u32 mem_clock); +int amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev, + u32 eng_clock, u32 mem_clock); bool amdgpu_atombios_is_voltage_gpio(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index 375f02002579..3893e6fc2f03 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c @@ -89,18 +89,6 @@ bool amdgpu_is_atpx_hybrid(void) return amdgpu_atpx_priv.atpx.is_hybrid; } -bool amdgpu_atpx_dgpu_req_power_for_displays(void) -{ - return amdgpu_atpx_priv.atpx.dgpu_req_power_for_displays; -} - -#if defined(CONFIG_ACPI) -void *amdgpu_atpx_get_dhandle(void) -{ - return amdgpu_atpx_priv.dhandle; -} -#endif - /** * amdgpu_atpx_call - call an ATPX method * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 1e475eb01417..d891ab779ca7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -265,7 +265,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p, /* Only a single BO list is allowed to simplify handling. */ if (p->bo_list) - ret = -EINVAL; + goto free_partial_kdata; ret = amdgpu_cs_p1_bo_handles(p, p->chunks[i].kdata); if (ret) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index cbef720de779..a68338cb7b4a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -402,7 +402,7 @@ static ssize_t amdgpu_debugfs_gprwave_read(struct file *f, char __user *buf, siz int r; uint32_t *data, x; - if (size & 0x3 || *pos & 0x3) + if (size > 4096 || size & 0x3 || *pos & 0x3) return -EINVAL; r = pm_runtime_get_sync(adev_to_drm(adev)->dev); @@ -1648,7 +1648,7 @@ int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) { ent = debugfs_create_file(debugfs_regs_names[i], - S_IFREG | 0444, root, + S_IFREG | 0400, root, adev, debugfs_regs[i]); if (!i && !IS_ERR_OR_NULL(ent)) i_size_write(ent->d_inode, adev->rmmio_size); @@ -2095,16 +2095,21 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) if (amdgpu_umsch_mm & amdgpu_umsch_mm_fwlog) amdgpu_debugfs_umsch_fwlog_init(adev, &adev->umsch_mm); + amdgpu_debugfs_jpeg_sched_mask_init(adev); + amdgpu_debugfs_gfx_sched_mask_init(adev); + amdgpu_debugfs_compute_sched_mask_init(adev); + amdgpu_debugfs_sdma_sched_mask_init(adev); + amdgpu_ras_debugfs_create_all(adev); amdgpu_rap_debugfs_init(adev); amdgpu_securedisplay_debugfs_init(adev); amdgpu_fw_attestation_debugfs_init(adev); - debugfs_create_file("amdgpu_evict_vram", 0444, root, adev, + debugfs_create_file("amdgpu_evict_vram", 0400, root, adev, &amdgpu_evict_vram_fops); - debugfs_create_file("amdgpu_evict_gtt", 0444, root, adev, + debugfs_create_file("amdgpu_evict_gtt", 0400, root, adev, &amdgpu_evict_gtt_fops); - debugfs_create_file("amdgpu_test_ib", 0444, root, adev, + debugfs_create_file("amdgpu_test_ib", 0400, root, adev, &amdgpu_debugfs_test_ib_fops); debugfs_create_file("amdgpu_vm_info", 0444, root, adev, &amdgpu_debugfs_vm_info_fops); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c index 5ac59b62020c..946c48829f19 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c @@ -203,6 +203,7 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count, struct amdgpu_coredump_info *coredump = data; struct drm_print_iterator iter; struct amdgpu_vm_fault_info *fault_info; + struct amdgpu_ip_block *ip_block; int ver; iter.data = buffer; @@ -282,13 +283,10 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count, /* dump the ip state for each ip */ drm_printf(&p, "IP Dump\n"); for (int i = 0; i < coredump->adev->num_ip_blocks; i++) { - if (coredump->adev->ip_blocks[i].version->funcs->print_ip_state) { - drm_printf(&p, "IP: %s\n", - coredump->adev->ip_blocks[i] - .version->funcs->name); - coredump->adev->ip_blocks[i] - .version->funcs->print_ip_state( - (void *)coredump->adev, &p); + ip_block = &coredump->adev->ip_blocks[i]; + if (ip_block->version->funcs->print_ip_state) { + drm_printf(&p, "IP: %s\n", ip_block->version->funcs->name); + ip_block->version->funcs->print_ip_state(ip_block, &p); drm_printf(&p, "\n"); } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index fd853dc843e9..96316111300a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -25,6 +25,8 @@ * Alex Deucher * Jerome Glisse */ + +#include <linux/aperture.h> #include <linux/power_supply.h> #include <linux/kthread.h> #include <linux/module.h> @@ -35,10 +37,9 @@ #include <linux/pci-p2pdma.h> #include <linux/apple-gmux.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_event.h> #include <drm/drm_crtc_helper.h> -#include <drm/drm_fb_helper.h> #include <drm/drm_probe_helper.h> #include <drm/amdgpu_drm.h> #include <linux/device.h> @@ -144,6 +145,59 @@ const char *amdgpu_asic_name[] = { "LAST", }; +#define AMDGPU_IP_BLK_MASK_ALL GENMASK(AMDGPU_MAX_IP_NUM, 0) +/* + * Default init level where all blocks are expected to be initialized. This is + * the level of initialization expected by default and also after a full reset + * of the device. + */ +struct amdgpu_init_level amdgpu_init_default = { + .level = AMDGPU_INIT_LEVEL_DEFAULT, + .hwini_ip_block_mask = AMDGPU_IP_BLK_MASK_ALL, +}; + +struct amdgpu_init_level amdgpu_init_recovery = { + .level = AMDGPU_INIT_LEVEL_RESET_RECOVERY, + .hwini_ip_block_mask = AMDGPU_IP_BLK_MASK_ALL, +}; + +/* + * Minimal blocks needed to be initialized before a XGMI hive can be reset. This + * is used for cases like reset on initialization where the entire hive needs to + * be reset before first use. + */ +struct amdgpu_init_level amdgpu_init_minimal_xgmi = { + .level = AMDGPU_INIT_LEVEL_MINIMAL_XGMI, + .hwini_ip_block_mask = + BIT(AMD_IP_BLOCK_TYPE_GMC) | BIT(AMD_IP_BLOCK_TYPE_SMC) | + BIT(AMD_IP_BLOCK_TYPE_COMMON) | BIT(AMD_IP_BLOCK_TYPE_IH) | + BIT(AMD_IP_BLOCK_TYPE_PSP) +}; + +static inline bool amdgpu_ip_member_of_hwini(struct amdgpu_device *adev, + enum amd_ip_block_type block) +{ + return (adev->init_lvl->hwini_ip_block_mask & (1U << block)) != 0; +} + +void amdgpu_set_init_level(struct amdgpu_device *adev, + enum amdgpu_init_lvl_id lvl) +{ + switch (lvl) { + case AMDGPU_INIT_LEVEL_MINIMAL_XGMI: + adev->init_lvl = &amdgpu_init_minimal_xgmi; + break; + case AMDGPU_INIT_LEVEL_RESET_RECOVERY: + adev->init_lvl = &amdgpu_init_recovery; + break; + case AMDGPU_INIT_LEVEL_DEFAULT: + fallthrough; + default: + adev->init_lvl = &amdgpu_init_default; + break; + } +} + static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev); /** @@ -227,6 +281,42 @@ void amdgpu_reg_state_sysfs_fini(struct amdgpu_device *adev) sysfs_remove_bin_file(&adev->dev->kobj, &bin_attr_reg_state); } +int amdgpu_ip_block_suspend(struct amdgpu_ip_block *ip_block) +{ + int r; + + if (ip_block->version->funcs->suspend) { + r = ip_block->version->funcs->suspend(ip_block); + if (r) { + dev_err(ip_block->adev->dev, + "suspend of IP block <%s> failed %d\n", + ip_block->version->funcs->name, r); + return r; + } + } + + ip_block->status.hw = false; + return 0; +} + +int amdgpu_ip_block_resume(struct amdgpu_ip_block *ip_block) +{ + int r; + + if (ip_block->version->funcs->resume) { + r = ip_block->version->funcs->resume(ip_block); + if (r) { + dev_err(ip_block->adev->dev, + "resume of IP block <%s> failed %d\n", + ip_block->version->funcs->name, r); + return r; + } + } + + ip_block->status.hw = true; + return 0; +} + /** * DOC: board_info * @@ -1655,7 +1745,7 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev) } /* Don't post if we need to reset whole hive on init */ - if (adev->gmc.xgmi.pending_reset) + if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI) return false; if (adev->has_hw_reset) { @@ -2159,9 +2249,12 @@ int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev, if (!adev->ip_blocks[i].status.valid) continue; if (adev->ip_blocks[i].version->type == block_type) { - r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev); - if (r) - return r; + if (adev->ip_blocks[i].version->funcs->wait_for_idle) { + r = adev->ip_blocks[i].version->funcs->wait_for_idle( + &adev->ip_blocks[i]); + if (r) + return r; + } break; } } @@ -2170,26 +2263,24 @@ int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev, } /** - * amdgpu_device_ip_is_idle - is the hardware IP idle + * amdgpu_device_ip_is_valid - is the hardware IP enabled * * @adev: amdgpu_device pointer * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) * - * Check if the hardware IP is idle or not. - * Returns true if it the IP is idle, false if not. + * Check if the hardware IP is enable or not. + * Returns true if it the IP is enable, false if not. */ -bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev, - enum amd_ip_block_type block_type) +bool amdgpu_device_ip_is_valid(struct amdgpu_device *adev, + enum amd_ip_block_type block_type) { int i; for (i = 0; i < adev->num_ip_blocks; i++) { - if (!adev->ip_blocks[i].status.valid) - continue; if (adev->ip_blocks[i].version->type == block_type) - return adev->ip_blocks[i].version->funcs->is_idle((void *)adev); + return adev->ip_blocks[i].status.valid; } - return true; + return false; } @@ -2271,6 +2362,8 @@ int amdgpu_device_ip_block_add(struct amdgpu_device *adev, DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks, ip_block_version->funcs->name); + adev->ip_blocks[adev->num_ip_blocks].adev = adev; + adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version; return 0; @@ -2566,25 +2659,25 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) total = true; for (i = 0; i < adev->num_ip_blocks; i++) { + ip_block = &adev->ip_blocks[i]; + if ((amdgpu_ip_block_mask & (1 << i)) == 0) { DRM_WARN("disabled ip block: %d <%s>\n", i, adev->ip_blocks[i].version->funcs->name); adev->ip_blocks[i].status.valid = false; - } else { - if (adev->ip_blocks[i].version->funcs->early_init) { - r = adev->ip_blocks[i].version->funcs->early_init((void *)adev); - if (r == -ENOENT) { - adev->ip_blocks[i].status.valid = false; - } else if (r) { - DRM_ERROR("early_init of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); - total = false; - } else { - adev->ip_blocks[i].status.valid = true; - } + } else if (ip_block->version->funcs->early_init) { + r = ip_block->version->funcs->early_init(ip_block); + if (r == -ENOENT) { + adev->ip_blocks[i].status.valid = false; + } else if (r) { + DRM_ERROR("early_init of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); + total = false; } else { adev->ip_blocks[i].status.valid = true; } + } else { + adev->ip_blocks[i].status.valid = true; } /* get the vbios after the asic_funcs are set up */ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) { @@ -2633,10 +2726,13 @@ static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev) continue; if (adev->ip_blocks[i].status.hw) continue; + if (!amdgpu_ip_member_of_hwini( + adev, adev->ip_blocks[i].version->type)) + continue; if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) || adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) { - r = adev->ip_blocks[i].version->funcs->hw_init(adev); + r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); if (r) { DRM_ERROR("hw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].version->funcs->name, r); @@ -2658,7 +2754,10 @@ static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev) continue; if (adev->ip_blocks[i].status.hw) continue; - r = adev->ip_blocks[i].version->funcs->hw_init(adev); + if (!amdgpu_ip_member_of_hwini( + adev, adev->ip_blocks[i].version->type)) + continue; + r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); if (r) { DRM_ERROR("hw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].version->funcs->name, r); @@ -2681,6 +2780,10 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev) if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP) continue; + if (!amdgpu_ip_member_of_hwini(adev, + AMD_IP_BLOCK_TYPE_PSP)) + break; + if (!adev->ip_blocks[i].status.sw) continue; @@ -2689,22 +2792,18 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev) break; if (amdgpu_in_reset(adev) || adev->in_suspend) { - r = adev->ip_blocks[i].version->funcs->resume(adev); - if (r) { - DRM_ERROR("resume of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + r = amdgpu_ip_block_resume(&adev->ip_blocks[i]); + if (r) return r; - } } else { - r = adev->ip_blocks[i].version->funcs->hw_init(adev); + r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); if (r) { DRM_ERROR("hw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].version->funcs->name, r); return r; } + adev->ip_blocks[i].status.hw = true; } - - adev->ip_blocks[i].status.hw = true; break; } } @@ -2786,6 +2885,7 @@ static int amdgpu_device_init_schedulers(struct amdgpu_device *adev) */ static int amdgpu_device_ip_init(struct amdgpu_device *adev) { + bool init_badpage; int i, r; r = amdgpu_ras_init(adev); @@ -2795,17 +2895,23 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) for (i = 0; i < adev->num_ip_blocks; i++) { if (!adev->ip_blocks[i].status.valid) continue; - r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev); - if (r) { - DRM_ERROR("sw_init of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); - goto init_failed; + if (adev->ip_blocks[i].version->funcs->sw_init) { + r = adev->ip_blocks[i].version->funcs->sw_init(&adev->ip_blocks[i]); + if (r) { + DRM_ERROR("sw_init of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); + goto init_failed; + } } adev->ip_blocks[i].status.sw = true; + if (!amdgpu_ip_member_of_hwini( + adev, adev->ip_blocks[i].version->type)) + continue; + if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) { /* need to do common hw init early so everything is set up for gmc */ - r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev); + r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); if (r) { DRM_ERROR("hw_init %d failed %d\n", i, r); goto init_failed; @@ -2822,7 +2928,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) DRM_ERROR("amdgpu_mem_scratch_init failed %d\n", r); goto init_failed; } - r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev); + r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); if (r) { DRM_ERROR("hw_init %d failed %d\n", i, r); goto init_failed; @@ -2895,7 +3001,8 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) * Note: theoretically, this should be called before all vram allocations * to protect retired page from abusing */ - r = amdgpu_ras_recovery_init(adev); + init_badpage = (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI); + r = amdgpu_ras_recovery_init(adev, init_badpage); if (r) goto init_failed; @@ -2935,7 +3042,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) amdgpu_ttm_set_buffer_funcs_status(adev, true); /* Don't init kfd if whole hive need to be reset during init */ - if (!adev->gmc.xgmi.pending_reset) { + if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) { kgd2kfd_init_zone_device(adev); amdgpu_amdkfd_device_init(adev); } @@ -3135,7 +3242,7 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) if (!adev->ip_blocks[i].status.hw) continue; if (adev->ip_blocks[i].version->funcs->late_init) { - r = adev->ip_blocks[i].version->funcs->late_init((void *)adev); + r = adev->ip_blocks[i].version->funcs->late_init(&adev->ip_blocks[i]); if (r) { DRM_ERROR("late_init of IP block <%s> failed %d\n", adev->ip_blocks[i].version->funcs->name, r); @@ -3151,7 +3258,7 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) return r; } - if (!amdgpu_in_reset(adev)) + if (!amdgpu_reset_in_recovery(adev)) amdgpu_ras_set_error_query_ready(adev, true); amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE); @@ -3206,6 +3313,25 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) return 0; } +static void amdgpu_ip_block_hw_fini(struct amdgpu_ip_block *ip_block) +{ + int r; + + if (!ip_block->version->funcs->hw_fini) { + DRM_ERROR("hw_fini of IP block <%s> not defined\n", + ip_block->version->funcs->name); + } else { + r = ip_block->version->funcs->hw_fini(ip_block); + /* XXX handle errors */ + if (r) { + DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", + ip_block->version->funcs->name, r); + } + } + + ip_block->status.hw = false; +} + /** * amdgpu_device_smu_fini_early - smu hw_fini wrapper * @@ -3215,7 +3341,7 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) */ static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev) { - int i, r; + int i; if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0)) return; @@ -3224,13 +3350,7 @@ static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev) if (!adev->ip_blocks[i].status.hw) continue; if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { - r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); - /* XXX handle errors */ - if (r) { - DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); - } - adev->ip_blocks[i].status.hw = false; + amdgpu_ip_block_hw_fini(&adev->ip_blocks[i]); break; } } @@ -3244,7 +3364,7 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev) if (!adev->ip_blocks[i].version->funcs->early_fini) continue; - r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev); + r = adev->ip_blocks[i].version->funcs->early_fini(&adev->ip_blocks[i]); if (r) { DRM_DEBUG("early_fini of IP block <%s> failed %d\n", adev->ip_blocks[i].version->funcs->name, r); @@ -3263,14 +3383,7 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev) if (!adev->ip_blocks[i].status.hw) continue; - r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); - /* XXX handle errors */ - if (r) { - DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); - } - - adev->ip_blocks[i].status.hw = false; + amdgpu_ip_block_hw_fini(&adev->ip_blocks[i]); } if (amdgpu_sriov_vf(adev)) { @@ -3316,12 +3429,13 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) amdgpu_ib_pool_fini(adev); amdgpu_seq64_fini(adev); } - - r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev); - /* XXX handle errors */ - if (r) { - DRM_DEBUG("sw_fini of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + if (adev->ip_blocks[i].version->funcs->sw_fini) { + r = adev->ip_blocks[i].version->funcs->sw_fini(&adev->ip_blocks[i]); + /* XXX handle errors */ + if (r) { + DRM_DEBUG("sw_fini of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); + } } adev->ip_blocks[i].status.sw = false; adev->ip_blocks[i].status.valid = false; @@ -3331,7 +3445,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) if (!adev->ip_blocks[i].status.late_initialized) continue; if (adev->ip_blocks[i].version->funcs->late_fini) - adev->ip_blocks[i].version->funcs->late_fini((void *)adev); + adev->ip_blocks[i].version->funcs->late_fini(&adev->ip_blocks[i]); adev->ip_blocks[i].status.late_initialized = false; } @@ -3403,15 +3517,9 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev) continue; /* XXX handle errors */ - r = adev->ip_blocks[i].version->funcs->suspend(adev); - /* XXX handle errors */ - if (r) { - DRM_ERROR("suspend of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]); + if (r) return r; - } - - adev->ip_blocks[i].status.hw = false; } return 0; @@ -3449,14 +3557,9 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) } /* skip unnecessary suspend if we do not initialize them yet */ - if (adev->gmc.xgmi.pending_reset && - !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || - adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC || - adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || - adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) { - adev->ip_blocks[i].status.hw = false; + if (!amdgpu_ip_member_of_hwini( + adev, adev->ip_blocks[i].version->type)) continue; - } /* skip suspend of gfx/mes and psp for S0ix * gfx is in gfxoff state, so on resume it will exit gfxoff just @@ -3490,13 +3593,9 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) continue; /* XXX handle errors */ - r = adev->ip_blocks[i].version->funcs->suspend(adev); - /* XXX handle errors */ - if (r) { - DRM_ERROR("suspend of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); - } + r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]); adev->ip_blocks[i].status.hw = false; + /* handle putting the SMC in the appropriate state */ if (!amdgpu_sriov_vf(adev)) { if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { @@ -3570,10 +3669,12 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev) !block->status.valid) continue; - r = block->version->funcs->hw_init(adev); - DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded"); - if (r) + r = block->version->funcs->hw_init(&adev->ip_blocks[i]); + if (r) { + dev_err(adev->dev, "RE-INIT-early: %s failed\n", + block->version->funcs->name); return r; + } block->status.hw = true; } } @@ -3583,7 +3684,8 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev) static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev) { - int i, r; + struct amdgpu_ip_block *block; + int i, r = 0; static enum amd_ip_block_type ip_order[] = { AMD_IP_BLOCK_TYPE_SMC, @@ -3598,30 +3700,28 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev) }; for (i = 0; i < ARRAY_SIZE(ip_order); i++) { - int j; - struct amdgpu_ip_block *block; - - for (j = 0; j < adev->num_ip_blocks; j++) { - block = &adev->ip_blocks[j]; + block = amdgpu_device_ip_get_ip_block(adev, ip_order[i]); - if (block->version->type != ip_order[i] || - !block->status.valid || - block->status.hw) - continue; + if (!block) + continue; - if (block->version->type == AMD_IP_BLOCK_TYPE_SMC) - r = block->version->funcs->resume(adev); - else - r = block->version->funcs->hw_init(adev); + if (block->status.valid && !block->status.hw) { + if (block->version->type == AMD_IP_BLOCK_TYPE_SMC) { + r = amdgpu_ip_block_resume(block); + } else { + r = block->version->funcs->hw_init(block); + } - DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded"); - if (r) - return r; + if (r) { + dev_err(adev->dev, "RE-INIT-late: %s failed\n", + block->version->funcs->name); + break; + } block->status.hw = true; } } - return 0; + return r; } /** @@ -3648,13 +3748,9 @@ static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev) adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH || (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) { - r = adev->ip_blocks[i].version->funcs->resume(adev); - if (r) { - DRM_ERROR("resume of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + r = amdgpu_ip_block_resume(&adev->ip_blocks[i]); + if (r) return r; - } - adev->ip_blocks[i].status.hw = true; } } @@ -3666,7 +3762,7 @@ static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev) * * @adev: amdgpu_device pointer * - * First resume function for hardware IPs. The list of all the hardware + * Second resume function for hardware IPs. The list of all the hardware * IPs that make up the asic is walked and the resume callbacks are run for * all blocks except COMMON, GMC, and IH. resume puts the hardware into a * functional state after a suspend and updates the software state as @@ -3684,15 +3780,42 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev) if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH || + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE || adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) continue; - r = adev->ip_blocks[i].version->funcs->resume(adev); - if (r) { - DRM_ERROR("resume of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + r = amdgpu_ip_block_resume(&adev->ip_blocks[i]); + if (r) return r; + } + + return 0; +} + +/** + * amdgpu_device_ip_resume_phase3 - run resume for hardware IPs + * + * @adev: amdgpu_device pointer + * + * Third resume function for hardware IPs. The list of all the hardware + * IPs that make up the asic is walked and the resume callbacks are run for + * all DCE. resume puts the hardware into a functional state after a suspend + * and updates the software state as necessary. This function is also used + * for restoring the GPU after a GPU reset. + * + * Returns 0 on success, negative error code on failure. + */ +static int amdgpu_device_ip_resume_phase3(struct amdgpu_device *adev) +{ + int i, r; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw) + continue; + if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) { + r = amdgpu_ip_block_resume(&adev->ip_blocks[i]); + if (r) + return r; } - adev->ip_blocks[i].status.hw = true; } return 0; @@ -3727,6 +3850,13 @@ static int amdgpu_device_ip_resume(struct amdgpu_device *adev) if (adev->mman.buffer_funcs_ring->sched.ready) amdgpu_ttm_set_buffer_funcs_status(adev, true); + if (r) + return r; + + amdgpu_fence_driver_hw_init(adev); + + r = amdgpu_device_ip_resume_phase3(adev); + return r; } @@ -4149,7 +4279,10 @@ int amdgpu_device_init(struct amdgpu_device *adev, * for throttling interrupt) = 60 seconds. */ ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1); + ratelimit_state_init(&adev->virt.ras_telemetry_rs, 5 * HZ, 1); + ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE); + ratelimit_set_flags(&adev->virt.ras_telemetry_rs, RATELIMIT_MSG_ON_RELEASE); /* Registers mapping */ /* TODO: block userspace mapping of io register */ @@ -4193,13 +4326,19 @@ int amdgpu_device_init(struct amdgpu_device *adev, amdgpu_device_set_mcbp(adev); + /* + * By default, use default mode where all blocks are expected to be + * initialized. At present a 'swinit' of blocks is required to be + * completed before the need for a different level is detected. + */ + amdgpu_set_init_level(adev, AMDGPU_INIT_LEVEL_DEFAULT); /* early init functions */ r = amdgpu_device_ip_early_init(adev); if (r) return r; /* Get rid of things like offb */ - r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver); + r = aperture_remove_conflicting_pci_devices(adev->pdev, amdgpu_kms_driver.name); if (r) return r; @@ -4265,20 +4404,8 @@ int amdgpu_device_init(struct amdgpu_device *adev, if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) { if (adev->gmc.xgmi.num_physical_nodes) { dev_info(adev->dev, "Pending hive reset.\n"); - adev->gmc.xgmi.pending_reset = true; - /* Only need to init necessary block for SMU to handle the reset */ - for (i = 0; i < adev->num_ip_blocks; i++) { - if (!adev->ip_blocks[i].status.valid) - continue; - if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || - adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || - adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH || - adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) { - DRM_DEBUG("IP %s disabled for hw_init.\n", - adev->ip_blocks[i].version->funcs->name); - adev->ip_blocks[i].status.hw = true; - } - } + amdgpu_set_init_level(adev, + AMDGPU_INIT_LEVEL_MINIMAL_XGMI); } else if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) && !amdgpu_device_has_display_hardware(adev)) { r = psp_gpu_reset(adev); @@ -4386,7 +4513,7 @@ fence_driver_init: /* enable clockgating, etc. after ib tests, etc. since some blocks require * explicit gating rather than handling it automatically. */ - if (!adev->gmc.xgmi.pending_reset) { + if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) { r = amdgpu_device_ip_late_init(adev); if (r) { dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n"); @@ -4436,6 +4563,7 @@ fence_driver_init: amdgpu_fru_sysfs_init(adev); amdgpu_reg_state_sysfs_init(adev); + amdgpu_xcp_cfg_sysfs_init(adev); if (IS_ENABLED(CONFIG_PERF_EVENTS)) r = amdgpu_pmu_init(adev); @@ -4463,9 +4591,8 @@ fence_driver_init: if (px) vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain); - if (adev->gmc.xgmi.pending_reset) - queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work, - msecs_to_jiffies(AMDGPU_RESUME_MS)); + if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI) + amdgpu_xgmi_reset_on_init(adev); amdgpu_device_check_iommu_direct_map(adev); @@ -4559,6 +4686,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) amdgpu_fru_sysfs_fini(adev); amdgpu_reg_state_sysfs_fini(adev); + amdgpu_xcp_cfg_sysfs_fini(adev); /* disable ras feature must before hw fini */ amdgpu_ras_pre_fini(adev); @@ -4584,8 +4712,8 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev) int idx; bool px; - amdgpu_fence_driver_sw_fini(adev); amdgpu_device_ip_fini(adev); + amdgpu_fence_driver_sw_fini(adev); amdgpu_ucode_release(&adev->firmware.gpu_info_fw); adev->accel_working = false; dma_fence_put(rcu_dereference_protected(adev->gang_submit, true)); @@ -4694,7 +4822,7 @@ int amdgpu_device_prepare(struct drm_device *dev) continue; if (!adev->ip_blocks[i].version->funcs->prepare_suspend) continue; - r = adev->ip_blocks[i].version->funcs->prepare_suspend((void *)adev); + r = adev->ip_blocks[i].version->funcs->prepare_suspend(&adev->ip_blocks[i]); if (r) goto unprepare; } @@ -4711,13 +4839,13 @@ unprepare: * amdgpu_device_suspend - initiate device suspend * * @dev: drm dev pointer - * @fbcon : notify the fbdev of suspend + * @notify_clients: notify in-kernel DRM clients * * Puts the hw in the suspend state (all asics). * Returns 0 for success or an error on failure. * Called at driver suspend. */ -int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) +int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients) { struct amdgpu_device *adev = drm_to_adev(dev); int r = 0; @@ -4737,8 +4865,8 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3)) DRM_WARN("smart shift update failed\n"); - if (fbcon) - drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true); + if (notify_clients) + drm_client_dev_suspend(adev_to_drm(adev), false); cancel_delayed_work_sync(&adev->delayed_init_work); @@ -4773,13 +4901,13 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) * amdgpu_device_resume - initiate device resume * * @dev: drm dev pointer - * @fbcon : notify the fbdev of resume + * @notify_clients: notify in-kernel DRM clients * * Bring the hw back to operating state (all asics). * Returns 0 for success or an error on failure. * Called at driver resume. */ -int amdgpu_device_resume(struct drm_device *dev, bool fbcon) +int amdgpu_device_resume(struct drm_device *dev, bool notify_clients) { struct amdgpu_device *adev = drm_to_adev(dev); int r = 0; @@ -4809,7 +4937,6 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon) dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r); goto exit; } - amdgpu_fence_driver_hw_init(adev); if (!adev->in_s0ix) { r = amdgpu_amdkfd_resume(adev, adev->in_runpm); @@ -4835,8 +4962,8 @@ exit: /* Make sure IB tests flushed */ flush_delayed_work(&adev->delayed_init_work); - if (fbcon) - drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false); + if (notify_clients) + drm_client_dev_resume(adev_to_drm(adev), false); amdgpu_ras_resume(adev); @@ -4898,7 +5025,8 @@ static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev) continue; if (adev->ip_blocks[i].version->funcs->check_soft_reset) adev->ip_blocks[i].status.hang = - adev->ip_blocks[i].version->funcs->check_soft_reset(adev); + adev->ip_blocks[i].version->funcs->check_soft_reset( + &adev->ip_blocks[i]); if (adev->ip_blocks[i].status.hang) { dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name); asic_hang = true; @@ -4927,7 +5055,7 @@ static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev) continue; if (adev->ip_blocks[i].status.hang && adev->ip_blocks[i].version->funcs->pre_soft_reset) { - r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev); + r = adev->ip_blocks[i].version->funcs->pre_soft_reset(&adev->ip_blocks[i]); if (r) return r; } @@ -4989,7 +5117,7 @@ static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev) continue; if (adev->ip_blocks[i].status.hang && adev->ip_blocks[i].version->funcs->soft_reset) { - r = adev->ip_blocks[i].version->funcs->soft_reset(adev); + r = adev->ip_blocks[i].version->funcs->soft_reset(&adev->ip_blocks[i]); if (r) return r; } @@ -5018,7 +5146,7 @@ static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev) continue; if (adev->ip_blocks[i].status.hang && adev->ip_blocks[i].version->funcs->post_soft_reset) - r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev); + r = adev->ip_blocks[i].version->funcs->post_soft_reset(&adev->ip_blocks[i]); if (r) return r; } @@ -5103,6 +5231,9 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) || amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3)) amdgpu_ras_resume(adev); + + amdgpu_virt_ras_telemetry_post_reset(adev); + return 0; } @@ -5309,7 +5440,7 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, for (i = 0; i < tmp_adev->num_ip_blocks; i++) if (tmp_adev->ip_blocks[i].version->funcs->dump_ip_state) tmp_adev->ip_blocks[i].version->funcs - ->dump_ip_state((void *)tmp_adev); + ->dump_ip_state((void *)&tmp_adev->ip_blocks[i]); dev_info(tmp_adev->dev, "Dumping IP State Completed\n"); } @@ -5325,74 +5456,33 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, return r; } -int amdgpu_do_asic_reset(struct list_head *device_list_handle, - struct amdgpu_reset_context *reset_context) +int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context) { - struct amdgpu_device *tmp_adev = NULL; - bool need_full_reset, skip_hw_reset, vram_lost = false; - int r = 0; + struct list_head *device_list_handle; + bool full_reset, vram_lost = false; + struct amdgpu_device *tmp_adev; + int r, init_level; - /* Try reset handler method first */ - tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device, - reset_list); + device_list_handle = reset_context->reset_device_list; - reset_context->reset_device_list = device_list_handle; - r = amdgpu_reset_perform_reset(tmp_adev, reset_context); - /* If reset handler not implemented, continue; otherwise return */ - if (r == -EOPNOTSUPP) - r = 0; - else - return r; + if (!device_list_handle) + return -EINVAL; - /* Reset handler not implemented, use the default method */ - need_full_reset = - test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); - skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags); + full_reset = test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); - /* - * ASIC reset has to be done on all XGMI hive nodes ASAP - * to allow proper links negotiation in FW (within 1 sec) + /** + * If it's reset on init, it's default init level, otherwise keep level + * as recovery level. */ - if (!skip_hw_reset && need_full_reset) { - list_for_each_entry(tmp_adev, device_list_handle, reset_list) { - /* For XGMI run all resets in parallel to speed up the process */ - if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { - tmp_adev->gmc.xgmi.pending_reset = false; - if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work)) - r = -EALREADY; - } else - r = amdgpu_asic_reset(tmp_adev); - - if (r) { - dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s", - r, adev_to_drm(tmp_adev)->unique); - goto out; - } - } - - /* For XGMI wait for all resets to complete before proceed */ - if (!r) { - list_for_each_entry(tmp_adev, device_list_handle, reset_list) { - if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { - flush_work(&tmp_adev->xgmi_reset_work); - r = tmp_adev->asic_reset_res; - if (r) - break; - } - } - } - } - - if (!r && amdgpu_ras_intr_triggered()) { - list_for_each_entry(tmp_adev, device_list_handle, reset_list) { - amdgpu_ras_reset_error_count(tmp_adev, AMDGPU_RAS_BLOCK__MMHUB); - } - - amdgpu_ras_intr_cleared(); - } + if (reset_context->method == AMD_RESET_METHOD_ON_INIT) + init_level = AMDGPU_INIT_LEVEL_DEFAULT; + else + init_level = AMDGPU_INIT_LEVEL_RESET_RECOVERY; + r = 0; list_for_each_entry(tmp_adev, device_list_handle, reset_list) { - if (need_full_reset) { + amdgpu_set_init_level(tmp_adev, init_level); + if (full_reset) { /* post card */ amdgpu_ras_set_fed(tmp_adev, false); r = amdgpu_device_asic_init(tmp_adev); @@ -5431,6 +5521,10 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle, if (tmp_adev->mman.buffer_funcs_ring->sched.ready) amdgpu_ttm_set_buffer_funcs_status(tmp_adev, true); + r = amdgpu_device_ip_resume_phase3(tmp_adev); + if (r) + goto out; + if (vram_lost) amdgpu_device_fill_reset_magic(tmp_adev); @@ -5448,7 +5542,7 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle, if (r) goto out; - drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false); + drm_client_dev_resume(adev_to_drm(tmp_adev), false); /* * The GPU enters bad state once faulty pages @@ -5478,11 +5572,13 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle, out: if (!r) { + /* IP init is complete now, set level as default */ + amdgpu_set_init_level(tmp_adev, + AMDGPU_INIT_LEVEL_DEFAULT); amdgpu_irq_gpu_reset_resume_helper(tmp_adev); r = amdgpu_ib_ring_tests(tmp_adev); if (r) { dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r); - need_full_reset = true; r = -EAGAIN; goto end; } @@ -5493,10 +5589,85 @@ out: } end: - if (need_full_reset) + return r; +} + +int amdgpu_do_asic_reset(struct list_head *device_list_handle, + struct amdgpu_reset_context *reset_context) +{ + struct amdgpu_device *tmp_adev = NULL; + bool need_full_reset, skip_hw_reset; + int r = 0; + + /* Try reset handler method first */ + tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device, + reset_list); + + reset_context->reset_device_list = device_list_handle; + r = amdgpu_reset_perform_reset(tmp_adev, reset_context); + /* If reset handler not implemented, continue; otherwise return */ + if (r == -EOPNOTSUPP) + r = 0; + else + return r; + + /* Reset handler not implemented, use the default method */ + need_full_reset = + test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); + skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags); + + /* + * ASIC reset has to be done on all XGMI hive nodes ASAP + * to allow proper links negotiation in FW (within 1 sec) + */ + if (!skip_hw_reset && need_full_reset) { + list_for_each_entry(tmp_adev, device_list_handle, reset_list) { + /* For XGMI run all resets in parallel to speed up the process */ + if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { + if (!queue_work(system_unbound_wq, + &tmp_adev->xgmi_reset_work)) + r = -EALREADY; + } else + r = amdgpu_asic_reset(tmp_adev); + + if (r) { + dev_err(tmp_adev->dev, + "ASIC reset failed with error, %d for drm dev, %s", + r, adev_to_drm(tmp_adev)->unique); + goto out; + } + } + + /* For XGMI wait for all resets to complete before proceed */ + if (!r) { + list_for_each_entry(tmp_adev, device_list_handle, + reset_list) { + if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { + flush_work(&tmp_adev->xgmi_reset_work); + r = tmp_adev->asic_reset_res; + if (r) + break; + } + } + } + } + + if (!r && amdgpu_ras_intr_triggered()) { + list_for_each_entry(tmp_adev, device_list_handle, reset_list) { + amdgpu_ras_reset_error_count(tmp_adev, + AMDGPU_RAS_BLOCK__MMHUB); + } + + amdgpu_ras_intr_cleared(); + } + + r = amdgpu_device_reinit_after_reset(reset_context); + if (r == -EAGAIN) set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); else clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); + +out: return r; } @@ -5734,7 +5905,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, */ amdgpu_unregister_gpu_instance(tmp_adev); - drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, true); + drm_client_dev_suspend(adev_to_drm(tmp_adev), false); /* disable ras on ALL IPs */ if (!need_emergency_restart && @@ -6092,6 +6263,9 @@ bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev, bool p2p_access = !adev->gmc.xgmi.connected_to_cpu && !(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0); + if (!p2p_access) + dev_info(adev->dev, "PCIe P2P access from peer device %s is not supported by the chipset\n", + pci_name(peer_adev->pdev)); bool is_large_bar = adev->gmc.visible_vram_size && adev->gmc.real_vram_size == adev->gmc.visible_vram_size; @@ -6344,6 +6518,9 @@ bool amdgpu_device_cache_pci_state(struct pci_dev *pdev) struct amdgpu_device *adev = drm_to_adev(dev); int r; + if (amdgpu_sriov_vf(adev)) + return false; + r = pci_save_state(pdev); if (!r) { kfree(adev->pci_state); @@ -6604,3 +6781,47 @@ uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev, } return ret; } + +ssize_t amdgpu_get_soft_full_reset_mask(struct amdgpu_ring *ring) +{ + ssize_t size = 0; + + if (!ring || !ring->adev) + return size; + + if (amdgpu_device_should_recover_gpu(ring->adev)) + size |= AMDGPU_RESET_TYPE_FULL; + + if (unlikely(!ring->adev->debug_disable_soft_recovery) && + !amdgpu_sriov_vf(ring->adev) && ring->funcs->soft_recovery) + size |= AMDGPU_RESET_TYPE_SOFT_RESET; + + return size; +} + +ssize_t amdgpu_show_reset_mask(char *buf, uint32_t supported_reset) +{ + ssize_t size = 0; + + if (supported_reset == 0) { + size += sysfs_emit_at(buf, size, "unsupported"); + size += sysfs_emit_at(buf, size, "\n"); + return size; + + } + + if (supported_reset & AMDGPU_RESET_TYPE_SOFT_RESET) + size += sysfs_emit_at(buf, size, "soft "); + + if (supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE) + size += sysfs_emit_at(buf, size, "queue "); + + if (supported_reset & AMDGPU_RESET_TYPE_PER_PIPE) + size += sysfs_emit_at(buf, size, "pipe "); + + if (supported_reset & AMDGPU_RESET_TYPE_FULL) + size += sysfs_emit_at(buf, size, "full "); + + size += sysfs_emit_at(buf, size, "\n"); + return size; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index 4bd61c169ca8..1040204ac8b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -1723,45 +1723,85 @@ union nps_info { struct nps_info_v1_0 v1; }; +static int amdgpu_discovery_refresh_nps_info(struct amdgpu_device *adev, + union nps_info *nps_data) +{ + uint64_t vram_size, pos, offset; + struct nps_info_header *nhdr; + struct binary_header bhdr; + uint16_t checksum; + + vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20; + pos = vram_size - DISCOVERY_TMR_OFFSET; + amdgpu_device_vram_access(adev, pos, &bhdr, sizeof(bhdr), false); + + offset = le16_to_cpu(bhdr.table_list[NPS_INFO].offset); + checksum = le16_to_cpu(bhdr.table_list[NPS_INFO].checksum); + + amdgpu_device_vram_access(adev, (pos + offset), nps_data, + sizeof(*nps_data), false); + + nhdr = (struct nps_info_header *)(nps_data); + if (!amdgpu_discovery_verify_checksum((uint8_t *)nps_data, + le32_to_cpu(nhdr->size_bytes), + checksum)) { + dev_err(adev->dev, "nps data refresh, checksum mismatch\n"); + return -EINVAL; + } + + return 0; +} + int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev, uint32_t *nps_type, struct amdgpu_gmc_memrange **ranges, - int *range_cnt) + int *range_cnt, bool refresh) { struct amdgpu_gmc_memrange *mem_ranges; struct binary_header *bhdr; union nps_info *nps_info; + union nps_info nps_data; u16 offset; - int i; + int i, r; if (!nps_type || !range_cnt || !ranges) return -EINVAL; - if (!adev->mman.discovery_bin) { - dev_err(adev->dev, - "fetch mem range failed, ip discovery uninitialized\n"); - return -EINVAL; - } + if (refresh) { + r = amdgpu_discovery_refresh_nps_info(adev, &nps_data); + if (r) + return r; + nps_info = &nps_data; + } else { + if (!adev->mman.discovery_bin) { + dev_err(adev->dev, + "fetch mem range failed, ip discovery uninitialized\n"); + return -EINVAL; + } - bhdr = (struct binary_header *)adev->mman.discovery_bin; - offset = le16_to_cpu(bhdr->table_list[NPS_INFO].offset); + bhdr = (struct binary_header *)adev->mman.discovery_bin; + offset = le16_to_cpu(bhdr->table_list[NPS_INFO].offset); - if (!offset) - return -ENOENT; + if (!offset) + return -ENOENT; - /* If verification fails, return as if NPS table doesn't exist */ - if (amdgpu_discovery_verify_npsinfo(adev, bhdr)) - return -ENOENT; + /* If verification fails, return as if NPS table doesn't exist */ + if (amdgpu_discovery_verify_npsinfo(adev, bhdr)) + return -ENOENT; - nps_info = (union nps_info *)(adev->mman.discovery_bin + offset); + nps_info = + (union nps_info *)(adev->mman.discovery_bin + offset); + } switch (le16_to_cpu(nps_info->v1.header.version_major)) { case 1: + mem_ranges = kvcalloc(nps_info->v1.count, + sizeof(*mem_ranges), + GFP_KERNEL); + if (!mem_ranges) + return -ENOMEM; *nps_type = nps_info->v1.nps_type; *range_cnt = nps_info->v1.count; - mem_ranges = kvzalloc( - *range_cnt * sizeof(struct amdgpu_gmc_memrange), - GFP_KERNEL); for (i = 0; i < *range_cnt; i++) { mem_ranges[i].base_address = nps_info->v1.instance_info[i].base_address; @@ -2492,6 +2532,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2); adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1); adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1); + adev->ip_versions[ISP_HWIP][0] = IP_VERSION(2, 0, 0); } else { adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0); adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0); @@ -2508,6 +2549,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0); adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0); adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0); + adev->ip_versions[ISP_HWIP][0] = IP_VERSION(2, 0, 0); } break; case CHIP_VEGA20: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h index f5d36525ec3e..b44d56465c5b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h @@ -33,6 +33,6 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev); int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev, uint32_t *nps_type, struct amdgpu_gmc_memrange **ranges, - int *range_cnt); + int *range_cnt, bool refresh); #endif /* __AMDGPU_DISCOVERY__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 7273c98c3963..38686203bea6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -232,8 +232,6 @@ int amdgpu_wbrf = -1; int amdgpu_damage_clips = -1; /* auto */ int amdgpu_umsch_mm_fwlog; -static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work); - DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0, "DRM_UT_CORE", "DRM_UT_DRIVER", @@ -248,9 +246,6 @@ DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0, struct amdgpu_mgpu_info mgpu_info = { .mutex = __MUTEX_INITIALIZER(mgpu_info.mutex), - .delayed_reset_work = __DELAYED_WORK_INITIALIZER( - mgpu_info.delayed_reset_work, - amdgpu_drv_delayed_reset_work_handler, 0), }; int amdgpu_ras_enable = -1; uint amdgpu_ras_mask = 0xffffffff; @@ -893,7 +888,7 @@ module_param_named(visualconfirm, amdgpu_dc_visual_confirm, uint, 0444); * the ABM algorithm, with 1 being the least reduction and 4 being the most * reduction. * - * Defaults to -1, or disabled. Userspace can only override this level after + * Defaults to -1, or auto. Userspace can only override this level after * boot if it's set to auto. */ int amdgpu_dm_abm_level = -1; @@ -2439,6 +2434,7 @@ amdgpu_pci_remove(struct pci_dev *pdev) struct amdgpu_device *adev = drm_to_adev(dev); amdgpu_xcp_dev_unplug(adev); + amdgpu_gmc_prepare_nps_mode_change(adev); drm_dev_unplug(dev); if (adev->pm.rpm_mode != AMDGPU_RUNPM_NONE) { @@ -2477,82 +2473,6 @@ amdgpu_pci_shutdown(struct pci_dev *pdev) adev->mp1_state = PP_MP1_STATE_NONE; } -/** - * amdgpu_drv_delayed_reset_work_handler - work handler for reset - * - * @work: work_struct. - */ -static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work) -{ - struct list_head device_list; - struct amdgpu_device *adev; - int i, r; - struct amdgpu_reset_context reset_context; - - memset(&reset_context, 0, sizeof(reset_context)); - - mutex_lock(&mgpu_info.mutex); - if (mgpu_info.pending_reset == true) { - mutex_unlock(&mgpu_info.mutex); - return; - } - mgpu_info.pending_reset = true; - mutex_unlock(&mgpu_info.mutex); - - /* Use a common context, just need to make sure full reset is done */ - reset_context.method = AMD_RESET_METHOD_NONE; - set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); - - for (i = 0; i < mgpu_info.num_dgpu; i++) { - adev = mgpu_info.gpu_ins[i].adev; - reset_context.reset_req_dev = adev; - r = amdgpu_device_pre_asic_reset(adev, &reset_context); - if (r) { - dev_err(adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ", - r, adev_to_drm(adev)->unique); - } - if (!queue_work(system_unbound_wq, &adev->xgmi_reset_work)) - r = -EALREADY; - } - for (i = 0; i < mgpu_info.num_dgpu; i++) { - adev = mgpu_info.gpu_ins[i].adev; - flush_work(&adev->xgmi_reset_work); - adev->gmc.xgmi.pending_reset = false; - } - - /* reset function will rebuild the xgmi hive info , clear it now */ - for (i = 0; i < mgpu_info.num_dgpu; i++) - amdgpu_xgmi_remove_device(mgpu_info.gpu_ins[i].adev); - - INIT_LIST_HEAD(&device_list); - - for (i = 0; i < mgpu_info.num_dgpu; i++) - list_add_tail(&mgpu_info.gpu_ins[i].adev->reset_list, &device_list); - - /* unregister the GPU first, reset function will add them back */ - list_for_each_entry(adev, &device_list, reset_list) - amdgpu_unregister_gpu_instance(adev); - - /* Use a common context, just need to make sure full reset is done */ - set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags); - set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags); - r = amdgpu_do_asic_reset(&device_list, &reset_context); - - if (r) { - DRM_ERROR("reinit gpus failure"); - return; - } - for (i = 0; i < mgpu_info.num_dgpu; i++) { - adev = mgpu_info.gpu_ins[i].adev; - if (!adev->kfd.init_complete) { - kgd2kfd_init_zone_device(adev); - amdgpu_amdkfd_device_init(adev); - amdgpu_amdkfd_drm_client_create(adev); - } - amdgpu_ttm_set_buffer_funcs_status(adev, true); - } -} - static int amdgpu_pmops_prepare(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); @@ -2585,7 +2505,6 @@ static int amdgpu_pmops_suspend(struct device *dev) struct drm_device *drm_dev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(drm_dev); - adev->suspend_complete = false; if (amdgpu_acpi_is_s0ix_active(adev)) adev->in_s0ix = true; else if (amdgpu_acpi_is_s3_active(adev)) @@ -2600,7 +2519,6 @@ static int amdgpu_pmops_suspend_noirq(struct device *dev) struct drm_device *drm_dev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(drm_dev); - adev->suspend_complete = true; if (amdgpu_acpi_should_gpu_reset(adev)) return amdgpu_asic_reset(adev); @@ -3075,6 +2993,12 @@ static int __init amdgpu_init(void) /* Ignore KFD init failures. Normal when CONFIG_HSA_AMD is not set. */ amdgpu_amdkfd_init(); + if (amdgpu_pp_feature_mask & PP_OVERDRIVE_MASK) { + add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); + pr_crit("Overdrive is enabled, please disable it before " + "reporting any bugs unrelated to overdrive.\n"); + } + /* let modprobe override vga console setting */ return pci_register_driver(&amdgpu_kms_pci_driver); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c index 35fee3e8cde2..8cd69836dd99 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c @@ -200,7 +200,7 @@ static int amdgpu_eeprom_xfer(struct i2c_adapter *i2c_adap, u32 eeprom_addr, dev_err_ratelimited(&i2c_adap->dev, "maddr:0x%04X size:0x%02X:quirk max_%s_len must be > %d", eeprom_addr, buf_size, - read ? "read" : "write", EEPROM_OFFSET_SIZE); + str_read_write(read), EEPROM_OFFSET_SIZE); return -EINVAL; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c index c7df7fa3459f..df2cf5c33925 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c @@ -33,6 +33,7 @@ #include <drm/amdgpu_drm.h> #include <drm/drm_debugfs.h> #include <drm/drm_drv.h> +#include <drm/drm_file.h> #include "amdgpu.h" #include "amdgpu_vm.h" @@ -59,18 +60,25 @@ void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file) struct amdgpu_fpriv *fpriv = file->driver_priv; struct amdgpu_vm *vm = &fpriv->vm; - struct amdgpu_mem_stats stats; + struct amdgpu_mem_stats stats[__AMDGPU_PL_LAST + 1] = { }; ktime_t usage[AMDGPU_HW_IP_NUM]; - unsigned int hw_ip; + const char *pl_name[] = { + [TTM_PL_VRAM] = "vram", + [TTM_PL_TT] = "gtt", + [TTM_PL_SYSTEM] = "cpu", + [AMDGPU_PL_GDS] = "gds", + [AMDGPU_PL_GWS] = "gws", + [AMDGPU_PL_OA] = "oa", + [AMDGPU_PL_DOORBELL] = "doorbell", + }; + unsigned int hw_ip, i; int ret; - memset(&stats, 0, sizeof(stats)); - ret = amdgpu_bo_reserve(vm->root.bo, false); if (ret) return; - amdgpu_vm_get_memory(vm, &stats); + amdgpu_vm_get_memory(vm, stats, ARRAY_SIZE(stats)); amdgpu_bo_unreserve(vm->root.bo); amdgpu_ctx_mgr_usage(&fpriv->ctx_mgr, usage); @@ -82,24 +90,33 @@ void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file) */ drm_printf(p, "pasid:\t%u\n", fpriv->vm.pasid); - drm_printf(p, "drm-memory-vram:\t%llu KiB\n", stats.vram/1024UL); - drm_printf(p, "drm-memory-gtt: \t%llu KiB\n", stats.gtt/1024UL); - drm_printf(p, "drm-memory-cpu: \t%llu KiB\n", stats.cpu/1024UL); - drm_printf(p, "amd-memory-visible-vram:\t%llu KiB\n", - stats.visible_vram/1024UL); + + for (i = 0; i < ARRAY_SIZE(pl_name); i++) { + if (!pl_name[i]) + continue; + + drm_print_memory_stats(p, + &stats[i].drm, + DRM_GEM_OBJECT_RESIDENT | + DRM_GEM_OBJECT_PURGEABLE, + pl_name[i]); + } + + /* Legacy amdgpu keys, alias to drm-resident-memory-: */ + drm_printf(p, "drm-memory-vram:\t%llu KiB\n", + stats[TTM_PL_VRAM].drm.resident/1024UL); + drm_printf(p, "drm-memory-gtt: \t%llu KiB\n", + stats[TTM_PL_TT].drm.resident/1024UL); + drm_printf(p, "drm-memory-cpu: \t%llu KiB\n", + stats[TTM_PL_SYSTEM].drm.resident/1024UL); + + /* Amdgpu specific memory accounting keys: */ drm_printf(p, "amd-evicted-vram:\t%llu KiB\n", - stats.evicted_vram/1024UL); - drm_printf(p, "amd-evicted-visible-vram:\t%llu KiB\n", - stats.evicted_visible_vram/1024UL); + stats[TTM_PL_VRAM].evicted/1024UL); drm_printf(p, "amd-requested-vram:\t%llu KiB\n", - stats.requested_vram/1024UL); - drm_printf(p, "amd-requested-visible-vram:\t%llu KiB\n", - stats.requested_visible_vram/1024UL); + stats[TTM_PL_VRAM].requested/1024UL); drm_printf(p, "amd-requested-gtt:\t%llu KiB\n", - stats.requested_gtt/1024UL); - drm_printf(p, "drm-shared-vram:\t%llu KiB\n", stats.vram_shared/1024UL); - drm_printf(p, "drm-shared-gtt:\t%llu KiB\n", stats.gtt_shared/1024UL); - drm_printf(p, "drm-shared-cpu:\t%llu KiB\n", stats.cpu_shared/1024UL); + stats[TTM_PL_TT].requested/1024UL); for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) { if (!usage[hw_ip]) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index 256b95232de5..b2033f8352f5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -78,8 +78,9 @@ static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev) if (adev->dummy_page_addr) return 0; - adev->dummy_page_addr = dma_map_page(&adev->pdev->dev, dummy_page, 0, - PAGE_SIZE, DMA_BIDIRECTIONAL); + adev->dummy_page_addr = dma_map_page_attrs(&adev->pdev->dev, dummy_page, 0, + PAGE_SIZE, DMA_BIDIRECTIONAL, + DMA_ATTR_SKIP_CPU_SYNC); if (dma_mapping_error(&adev->pdev->dev, adev->dummy_page_addr)) { dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n"); adev->dummy_page_addr = 0; @@ -99,8 +100,9 @@ void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev) { if (!adev->dummy_page_addr) return; - dma_unmap_page(&adev->pdev->dev, adev->dummy_page_addr, PAGE_SIZE, - DMA_BIDIRECTIONAL); + dma_unmap_page_attrs(&adev->pdev->dev, adev->dummy_page_addr, PAGE_SIZE, + DMA_BIDIRECTIONAL, + DMA_ATTR_SKIP_CPU_SYNC); adev->dummy_page_addr = 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 83e54697f0ee..69a6b6dba0a5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -87,16 +87,6 @@ int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, return bit; } -void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit, - int *me, int *pipe, int *queue) -{ - *queue = bit % adev->gfx.me.num_queue_per_pipe; - *pipe = (bit / adev->gfx.me.num_queue_per_pipe) - % adev->gfx.me.num_pipe_per_me; - *me = (bit / adev->gfx.me.num_queue_per_pipe) - / adev->gfx.me.num_pipe_per_me; -} - bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, int me, int pipe, int queue) { @@ -415,7 +405,7 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, } /* prepare MQD backup */ - kiq->mqd_backup = kmalloc(mqd_size, GFP_KERNEL); + kiq->mqd_backup = kzalloc(mqd_size, GFP_KERNEL); if (!kiq->mqd_backup) { dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name); @@ -438,7 +428,7 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, ring->mqd_size = mqd_size; /* prepare MQD backup */ - adev->gfx.me.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL); + adev->gfx.me.mqd_backup[i] = kzalloc(mqd_size, GFP_KERNEL); if (!adev->gfx.me.mqd_backup[i]) { dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name); return -ENOMEM; @@ -462,7 +452,7 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, ring->mqd_size = mqd_size; /* prepare MQD backup */ - adev->gfx.mec.mqd_backup[j] = kmalloc(mqd_size, GFP_KERNEL); + adev->gfx.mec.mqd_backup[j] = kzalloc(mqd_size, GFP_KERNEL); if (!adev->gfx.mec.mqd_backup[j]) { dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name); return -ENOMEM; @@ -525,6 +515,9 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id) if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) return -EINVAL; + if (!kiq_ring->sched.ready || adev->job_hang || amdgpu_in_reset(adev)) + return 0; + spin_lock(&kiq->ring_lock); if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size * adev->gfx.num_compute_rings)) { @@ -538,20 +531,15 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id) &adev->gfx.compute_ring[j], RESET_QUEUES, 0, 0); } - - /** - * This is workaround: only skip kiq_ring test - * during ras recovery in suspend stage for gfx9.4.3 + /* Submit unmap queue packet */ + amdgpu_ring_commit(kiq_ring); + /* + * Ring test will do a basic scratch register change check. Just run + * this to ensure that unmap queues that is submitted before got + * processed successfully before returning. */ - if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) || - amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) && - amdgpu_ras_in_recovery(adev)) { - spin_unlock(&kiq->ring_lock); - return 0; - } + r = amdgpu_ring_test_helper(kiq_ring); - if (kiq_ring->sched.ready && !adev->job_hang) - r = amdgpu_ring_test_helper(kiq_ring); spin_unlock(&kiq->ring_lock); return r; @@ -579,8 +567,11 @@ int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id) if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) return -EINVAL; - spin_lock(&kiq->ring_lock); + if (!adev->gfx.kiq[0].ring.sched.ready || adev->job_hang) + return 0; + if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) { + spin_lock(&kiq->ring_lock); if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size * adev->gfx.num_gfx_rings)) { spin_unlock(&kiq->ring_lock); @@ -593,11 +584,17 @@ int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id) &adev->gfx.gfx_ring[j], PREEMPT_QUEUES, 0, 0); } - } + /* Submit unmap queue packet */ + amdgpu_ring_commit(kiq_ring); - if (adev->gfx.kiq[0].ring.sched.ready && !adev->job_hang) + /* + * Ring test will do a basic scratch register change check. + * Just run this to ensure that unmap queues that is submitted + * before got processed successfully before returning. + */ r = amdgpu_ring_test_helper(kiq_ring); - spin_unlock(&kiq->ring_lock); + spin_unlock(&kiq->ring_lock); + } return r; } @@ -702,7 +699,13 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id) kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.compute_ring[j]); } - + /* Submit map queue packet */ + amdgpu_ring_commit(kiq_ring); + /* + * Ring test will do a basic scratch register change check. Just run + * this to ensure that map queues that is submitted before got + * processed successfully before returning. + */ r = amdgpu_ring_test_helper(kiq_ring); spin_unlock(&kiq->ring_lock); if (r) @@ -753,7 +756,13 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id) &adev->gfx.gfx_ring[j]); } } - + /* Submit map queue packet */ + amdgpu_ring_commit(kiq_ring); + /* + * Ring test will do a basic scratch register change check. Just run + * this to ensure that map queues that is submitted before got + * processed successfully before returning. + */ r = amdgpu_ring_test_helper(kiq_ring); spin_unlock(&kiq->ring_lock); if (r) @@ -895,6 +904,9 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *r if (r) return r; + if (amdgpu_sriov_vf(adev)) + return r; + if (adev->gfx.cp_ecc_error_irq.funcs) { r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0); if (r) @@ -1363,35 +1375,35 @@ static ssize_t amdgpu_gfx_set_compute_partition(struct device *dev, return count; } +static const char *xcp_desc[] = { + [AMDGPU_SPX_PARTITION_MODE] = "SPX", + [AMDGPU_DPX_PARTITION_MODE] = "DPX", + [AMDGPU_TPX_PARTITION_MODE] = "TPX", + [AMDGPU_QPX_PARTITION_MODE] = "QPX", + [AMDGPU_CPX_PARTITION_MODE] = "CPX", +}; + static ssize_t amdgpu_gfx_get_available_compute_partition(struct device *dev, struct device_attribute *addr, char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); - char *supported_partition; + struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr; + int size = 0, mode; + char *sep = ""; - /* TBD */ - switch (NUM_XCC(adev->gfx.xcc_mask)) { - case 8: - supported_partition = "SPX, DPX, QPX, CPX"; - break; - case 6: - supported_partition = "SPX, TPX, CPX"; - break; - case 4: - supported_partition = "SPX, DPX, CPX"; - break; - /* this seems only existing in emulation phase */ - case 2: - supported_partition = "SPX, CPX"; - break; - default: - supported_partition = "Not supported"; - break; + if (!xcp_mgr || !xcp_mgr->avail_xcp_modes) + return sysfs_emit(buf, "Not supported\n"); + + for_each_inst(mode, xcp_mgr->avail_xcp_modes) { + size += sysfs_emit_at(buf, size, "%s%s", sep, xcp_desc[mode]); + sep = ", "; } - return sysfs_emit(buf, "%s\n", supported_partition); + size += sysfs_emit_at(buf, size, "\n"); + + return size; } static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring) @@ -1586,9 +1598,11 @@ static ssize_t amdgpu_gfx_set_enforce_isolation(struct device *dev, if (adev->enforce_isolation[i] && !partition_values[i]) { /* Going from enabled to disabled */ amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(i)); + amdgpu_mes_set_enforce_isolation(adev, i, false); } else if (!adev->enforce_isolation[i] && partition_values[i]) { /* Going from disabled to enabled */ amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(i)); + amdgpu_mes_set_enforce_isolation(adev, i, true); } adev->enforce_isolation[i] = partition_values[i]; } @@ -1598,6 +1612,32 @@ static ssize_t amdgpu_gfx_set_enforce_isolation(struct device *dev, return count; } +static ssize_t amdgpu_gfx_get_gfx_reset_mask(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + + if (!adev) + return -ENODEV; + + return amdgpu_show_reset_mask(buf, adev->gfx.gfx_supported_reset); +} + +static ssize_t amdgpu_gfx_get_compute_reset_mask(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + + if (!adev) + return -ENODEV; + + return amdgpu_show_reset_mask(buf, adev->gfx.compute_supported_reset); +} + static DEVICE_ATTR(run_cleaner_shader, 0200, NULL, amdgpu_gfx_set_run_cleaner_shader); @@ -1611,48 +1651,138 @@ static DEVICE_ATTR(current_compute_partition, 0644, static DEVICE_ATTR(available_compute_partition, 0444, amdgpu_gfx_get_available_compute_partition, NULL); +static DEVICE_ATTR(gfx_reset_mask, 0444, + amdgpu_gfx_get_gfx_reset_mask, NULL); -int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev) +static DEVICE_ATTR(compute_reset_mask, 0444, + amdgpu_gfx_get_compute_reset_mask, NULL); + +static int amdgpu_gfx_sysfs_xcp_init(struct amdgpu_device *adev) { + struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr; + bool xcp_switch_supported; int r; + if (!xcp_mgr) + return 0; + + xcp_switch_supported = + (xcp_mgr->funcs && xcp_mgr->funcs->switch_partition_mode); + + if (!xcp_switch_supported) + dev_attr_current_compute_partition.attr.mode &= + ~(S_IWUSR | S_IWGRP | S_IWOTH); + r = device_create_file(adev->dev, &dev_attr_current_compute_partition); if (r) return r; - r = device_create_file(adev->dev, &dev_attr_available_compute_partition); + if (xcp_switch_supported) + r = device_create_file(adev->dev, + &dev_attr_available_compute_partition); return r; } -void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev) +static void amdgpu_gfx_sysfs_xcp_fini(struct amdgpu_device *adev) { + struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr; + bool xcp_switch_supported; + + if (!xcp_mgr) + return; + + xcp_switch_supported = + (xcp_mgr->funcs && xcp_mgr->funcs->switch_partition_mode); device_remove_file(adev->dev, &dev_attr_current_compute_partition); - device_remove_file(adev->dev, &dev_attr_available_compute_partition); + + if (xcp_switch_supported) + device_remove_file(adev->dev, + &dev_attr_available_compute_partition); } -int amdgpu_gfx_sysfs_isolation_shader_init(struct amdgpu_device *adev) +static int amdgpu_gfx_sysfs_isolation_shader_init(struct amdgpu_device *adev) { int r; - if (!amdgpu_sriov_vf(adev)) { - r = device_create_file(adev->dev, &dev_attr_enforce_isolation); + r = device_create_file(adev->dev, &dev_attr_enforce_isolation); + if (r) + return r; + if (adev->gfx.enable_cleaner_shader) + r = device_create_file(adev->dev, &dev_attr_run_cleaner_shader); + + return r; +} + +static void amdgpu_gfx_sysfs_isolation_shader_fini(struct amdgpu_device *adev) +{ + device_remove_file(adev->dev, &dev_attr_enforce_isolation); + if (adev->gfx.enable_cleaner_shader) + device_remove_file(adev->dev, &dev_attr_run_cleaner_shader); +} + +static int amdgpu_gfx_sysfs_reset_mask_init(struct amdgpu_device *adev) +{ + int r = 0; + + if (!amdgpu_gpu_recovery) + return r; + + if (adev->gfx.num_gfx_rings) { + r = device_create_file(adev->dev, &dev_attr_gfx_reset_mask); if (r) return r; } - r = device_create_file(adev->dev, &dev_attr_run_cleaner_shader); - if (r) + if (adev->gfx.num_compute_rings) { + r = device_create_file(adev->dev, &dev_attr_compute_reset_mask); + if (r) + return r; + } + + return r; +} + +static void amdgpu_gfx_sysfs_reset_mask_fini(struct amdgpu_device *adev) +{ + if (!amdgpu_gpu_recovery) + return; + + if (adev->gfx.num_gfx_rings) + device_remove_file(adev->dev, &dev_attr_gfx_reset_mask); + + if (adev->gfx.num_compute_rings) + device_remove_file(adev->dev, &dev_attr_compute_reset_mask); +} + +int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev) +{ + int r; + + r = amdgpu_gfx_sysfs_xcp_init(adev); + if (r) { + dev_err(adev->dev, "failed to create xcp sysfs files"); return r; + } - return 0; + r = amdgpu_gfx_sysfs_isolation_shader_init(adev); + if (r) + dev_err(adev->dev, "failed to create isolation sysfs files"); + + r = amdgpu_gfx_sysfs_reset_mask_init(adev); + if (r) + dev_err(adev->dev, "failed to create reset mask sysfs files"); + + return r; } -void amdgpu_gfx_sysfs_isolation_shader_fini(struct amdgpu_device *adev) +void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev) { - if (!amdgpu_sriov_vf(adev)) - device_remove_file(adev->dev, &dev_attr_enforce_isolation); - device_remove_file(adev->dev, &dev_attr_run_cleaner_shader); + if (adev->dev->kobj.sd) { + amdgpu_gfx_sysfs_xcp_fini(adev); + amdgpu_gfx_sysfs_isolation_shader_fini(adev); + amdgpu_gfx_sysfs_reset_mask_fini(adev); + } } int amdgpu_gfx_cleaner_shader_sw_init(struct amdgpu_device *adev, @@ -1740,7 +1870,7 @@ static void amdgpu_gfx_kfd_sch_ctrl(struct amdgpu_device *adev, u32 idx, if (adev->gfx.kfd_sch_req_count[idx] == 0 && adev->gfx.kfd_sch_inactive[idx]) { schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work, - GFX_SLICE_PERIOD); + msecs_to_jiffies(adev->gfx.enforce_isolation_time[idx])); } } else { if (adev->gfx.kfd_sch_req_count[idx] == 0) { @@ -1795,8 +1925,9 @@ void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work) fences += amdgpu_fence_count_emitted(&adev->gfx.compute_ring[i]); } if (fences) { + /* we've already had our timeslice, so let's wrap this up */ schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work, - GFX_SLICE_PERIOD); + msecs_to_jiffies(1)); } else { /* Tell KFD to resume the runqueue */ if (adev->kfd.init_complete) { @@ -1809,6 +1940,51 @@ void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work) mutex_unlock(&adev->enforce_isolation_mutex); } +static void +amdgpu_gfx_enforce_isolation_wait_for_kfd(struct amdgpu_device *adev, + u32 idx) +{ + unsigned long cjiffies; + bool wait = false; + + mutex_lock(&adev->enforce_isolation_mutex); + if (adev->enforce_isolation[idx]) { + /* set the initial values if nothing is set */ + if (!adev->gfx.enforce_isolation_jiffies[idx]) { + adev->gfx.enforce_isolation_jiffies[idx] = jiffies; + adev->gfx.enforce_isolation_time[idx] = GFX_SLICE_PERIOD_MS; + } + /* Make sure KFD gets a chance to run */ + if (amdgpu_amdkfd_compute_active(adev, idx)) { + cjiffies = jiffies; + if (time_after(cjiffies, adev->gfx.enforce_isolation_jiffies[idx])) { + cjiffies -= adev->gfx.enforce_isolation_jiffies[idx]; + if ((jiffies_to_msecs(cjiffies) >= GFX_SLICE_PERIOD_MS)) { + /* if our time is up, let KGD work drain before scheduling more */ + wait = true; + /* reset the timer period */ + adev->gfx.enforce_isolation_time[idx] = GFX_SLICE_PERIOD_MS; + } else { + /* set the timer period to what's left in our time slice */ + adev->gfx.enforce_isolation_time[idx] = + GFX_SLICE_PERIOD_MS - jiffies_to_msecs(cjiffies); + } + } else { + /* if jiffies wrap around we will just wait a little longer */ + adev->gfx.enforce_isolation_jiffies[idx] = jiffies; + } + } else { + /* if there is no KFD work, then set the full slice period */ + adev->gfx.enforce_isolation_jiffies[idx] = jiffies; + adev->gfx.enforce_isolation_time[idx] = GFX_SLICE_PERIOD_MS; + } + } + mutex_unlock(&adev->enforce_isolation_mutex); + + if (wait) + msleep(GFX_SLICE_PERIOD_MS); +} + void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; @@ -1825,6 +2001,9 @@ void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring) if (idx >= MAX_XCP) return; + /* Don't submit more work until KFD has had some time */ + amdgpu_gfx_enforce_isolation_wait_for_kfd(adev, idx); + mutex_lock(&adev->enforce_isolation_mutex); if (adev->enforce_isolation[idx]) { if (adev->kfd.init_complete) @@ -1856,3 +2035,144 @@ void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring) } mutex_unlock(&adev->enforce_isolation_mutex); } + +/* + * debugfs for to enable/disable gfx job submission to specific core. + */ +#if defined(CONFIG_DEBUG_FS) +static int amdgpu_debugfs_gfx_sched_mask_set(void *data, u64 val) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)data; + u32 i; + u64 mask = 0; + struct amdgpu_ring *ring; + + if (!adev) + return -ENODEV; + + mask = (1 << adev->gfx.num_gfx_rings) - 1; + if ((val & mask) == 0) + return -EINVAL; + + for (i = 0; i < adev->gfx.num_gfx_rings; ++i) { + ring = &adev->gfx.gfx_ring[i]; + if (val & (1 << i)) + ring->sched.ready = true; + else + ring->sched.ready = false; + } + /* publish sched.ready flag update effective immediately across smp */ + smp_rmb(); + return 0; +} + +static int amdgpu_debugfs_gfx_sched_mask_get(void *data, u64 *val) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)data; + u32 i; + u64 mask = 0; + struct amdgpu_ring *ring; + + if (!adev) + return -ENODEV; + for (i = 0; i < adev->gfx.num_gfx_rings; ++i) { + ring = &adev->gfx.gfx_ring[i]; + if (ring->sched.ready) + mask |= 1 << i; + } + + *val = mask; + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_gfx_sched_mask_fops, + amdgpu_debugfs_gfx_sched_mask_get, + amdgpu_debugfs_gfx_sched_mask_set, "%llx\n"); + +#endif + +void amdgpu_debugfs_gfx_sched_mask_init(struct amdgpu_device *adev) +{ +#if defined(CONFIG_DEBUG_FS) + struct drm_minor *minor = adev_to_drm(adev)->primary; + struct dentry *root = minor->debugfs_root; + char name[32]; + + if (!(adev->gfx.num_gfx_rings > 1)) + return; + sprintf(name, "amdgpu_gfx_sched_mask"); + debugfs_create_file(name, 0600, root, adev, + &amdgpu_debugfs_gfx_sched_mask_fops); +#endif +} + +/* + * debugfs for to enable/disable compute job submission to specific core. + */ +#if defined(CONFIG_DEBUG_FS) +static int amdgpu_debugfs_compute_sched_mask_set(void *data, u64 val) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)data; + u32 i; + u64 mask = 0; + struct amdgpu_ring *ring; + + if (!adev) + return -ENODEV; + + mask = (1 << adev->gfx.num_compute_rings) - 1; + if ((val & mask) == 0) + return -EINVAL; + + for (i = 0; i < adev->gfx.num_compute_rings; ++i) { + ring = &adev->gfx.compute_ring[i]; + if (val & (1 << i)) + ring->sched.ready = true; + else + ring->sched.ready = false; + } + + /* publish sched.ready flag update effective immediately across smp */ + smp_rmb(); + return 0; +} + +static int amdgpu_debugfs_compute_sched_mask_get(void *data, u64 *val) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)data; + u32 i; + u64 mask = 0; + struct amdgpu_ring *ring; + + if (!adev) + return -ENODEV; + for (i = 0; i < adev->gfx.num_compute_rings; ++i) { + ring = &adev->gfx.compute_ring[i]; + if (ring->sched.ready) + mask |= 1 << i; + } + + *val = mask; + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_compute_sched_mask_fops, + amdgpu_debugfs_compute_sched_mask_get, + amdgpu_debugfs_compute_sched_mask_set, "%llx\n"); + +#endif + +void amdgpu_debugfs_compute_sched_mask_init(struct amdgpu_device *adev) +{ +#if defined(CONFIG_DEBUG_FS) + struct drm_minor *minor = adev_to_drm(adev)->primary; + struct dentry *root = minor->debugfs_root; + char name[32]; + + if (!(adev->gfx.num_compute_rings > 1)) + return; + sprintf(name, "amdgpu_compute_sched_mask"); + debugfs_create_file(name, 0600, root, adev, + &amdgpu_debugfs_compute_sched_mask_fops); +#endif +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 5644e10a86a9..8b5bd63b5773 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -424,6 +424,8 @@ struct amdgpu_gfx { /* reset mask */ uint32_t grbm_soft_reset; uint32_t srbm_soft_reset; + uint32_t gfx_supported_reset; + uint32_t compute_supported_reset; /* gfx off */ bool gfx_off_state; /* true: enabled, false: disabled */ @@ -472,6 +474,8 @@ struct amdgpu_gfx { struct mutex kfd_sch_mutex; u64 kfd_sch_req_count[MAX_XCP]; bool kfd_sch_inactive[MAX_XCP]; + unsigned long enforce_isolation_jiffies[MAX_XCP]; + unsigned long enforce_isolation_time[MAX_XCP]; }; struct amdgpu_gfx_ras_reg_entry { @@ -540,8 +544,6 @@ bool amdgpu_gfx_is_high_priority_graphics_queue(struct amdgpu_device *adev, struct amdgpu_ring *ring); int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, int me, int pipe, int queue); -void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit, - int *me, int *pipe, int *queue); bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, int me, int pipe, int queue); void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable); @@ -579,11 +581,11 @@ void amdgpu_gfx_cleaner_shader_sw_fini(struct amdgpu_device *adev); void amdgpu_gfx_cleaner_shader_init(struct amdgpu_device *adev, unsigned int cleaner_shader_size, const void *cleaner_shader_ptr); -int amdgpu_gfx_sysfs_isolation_shader_init(struct amdgpu_device *adev); -void amdgpu_gfx_sysfs_isolation_shader_fini(struct amdgpu_device *adev); void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work); void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring); void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring); +void amdgpu_debugfs_gfx_sched_mask_init(struct amdgpu_device *adev); +void amdgpu_debugfs_compute_sched_mask_init(struct amdgpu_device *adev); static inline const char *amdgpu_gfx_compute_mode_desc(int mode) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 17a19d49d30a..1c19a65e6553 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -1065,18 +1065,6 @@ uint64_t amdgpu_gmc_vram_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo) return amdgpu_gmc_vram_mc2pa(adev, amdgpu_bo_gpu_offset(bo)); } -/** - * amdgpu_gmc_vram_cpu_pa - calculate vram buffer object's physical address - * from CPU's view - * - * @adev: amdgpu_device pointer - * @bo: amdgpu buffer object - */ -uint64_t amdgpu_gmc_vram_cpu_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo) -{ - return amdgpu_bo_gpu_offset(bo) - adev->gmc.vram_start + adev->gmc.aper_base; -} - int amdgpu_gmc_vram_checking(struct amdgpu_device *adev) { struct amdgpu_bo *vram_bo = NULL; @@ -1130,6 +1118,79 @@ release_buffer: return ret; } +static const char *nps_desc[] = { + [AMDGPU_NPS1_PARTITION_MODE] = "NPS1", + [AMDGPU_NPS2_PARTITION_MODE] = "NPS2", + [AMDGPU_NPS3_PARTITION_MODE] = "NPS3", + [AMDGPU_NPS4_PARTITION_MODE] = "NPS4", + [AMDGPU_NPS6_PARTITION_MODE] = "NPS6", + [AMDGPU_NPS8_PARTITION_MODE] = "NPS8", +}; + +static ssize_t available_memory_partition_show(struct device *dev, + struct device_attribute *addr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + int size = 0, mode; + char *sep = ""; + + for_each_inst(mode, adev->gmc.supported_nps_modes) { + size += sysfs_emit_at(buf, size, "%s%s", sep, nps_desc[mode]); + sep = ", "; + } + size += sysfs_emit_at(buf, size, "\n"); + + return size; +} + +static ssize_t current_memory_partition_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + enum amdgpu_memory_partition mode; + struct amdgpu_hive_info *hive; + int i; + + mode = UNKNOWN_MEMORY_PARTITION_MODE; + for_each_inst(i, adev->gmc.supported_nps_modes) { + if (!strncasecmp(nps_desc[i], buf, strlen(nps_desc[i]))) { + mode = i; + break; + } + } + + if (mode == UNKNOWN_MEMORY_PARTITION_MODE) + return -EINVAL; + + if (mode == adev->gmc.gmc_funcs->query_mem_partition_mode(adev)) { + dev_info( + adev->dev, + "requested NPS mode is same as current NPS mode, skipping\n"); + return count; + } + + /* If device is part of hive, all devices in the hive should request the + * same mode. Hence store the requested mode in hive. + */ + hive = amdgpu_get_xgmi_hive(adev); + if (hive) { + atomic_set(&hive->requested_nps_mode, mode); + amdgpu_put_xgmi_hive(hive); + } else { + adev->gmc.requested_nps_mode = mode; + } + + dev_info( + adev->dev, + "NPS mode change requested, please remove and reload the driver\n"); + + return count; +} + static ssize_t current_memory_partition_show( struct device *dev, struct device_attribute *addr, char *buf) { @@ -1138,53 +1199,65 @@ static ssize_t current_memory_partition_show( enum amdgpu_memory_partition mode; mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev); - switch (mode) { - case AMDGPU_NPS1_PARTITION_MODE: - return sysfs_emit(buf, "NPS1\n"); - case AMDGPU_NPS2_PARTITION_MODE: - return sysfs_emit(buf, "NPS2\n"); - case AMDGPU_NPS3_PARTITION_MODE: - return sysfs_emit(buf, "NPS3\n"); - case AMDGPU_NPS4_PARTITION_MODE: - return sysfs_emit(buf, "NPS4\n"); - case AMDGPU_NPS6_PARTITION_MODE: - return sysfs_emit(buf, "NPS6\n"); - case AMDGPU_NPS8_PARTITION_MODE: - return sysfs_emit(buf, "NPS8\n"); - default: + if ((mode >= ARRAY_SIZE(nps_desc)) || + (BIT(mode) & AMDGPU_ALL_NPS_MASK) != BIT(mode)) return sysfs_emit(buf, "UNKNOWN\n"); - } + + return sysfs_emit(buf, "%s\n", nps_desc[mode]); } -static DEVICE_ATTR_RO(current_memory_partition); +static DEVICE_ATTR_RW(current_memory_partition); +static DEVICE_ATTR_RO(available_memory_partition); int amdgpu_gmc_sysfs_init(struct amdgpu_device *adev) { + bool nps_switch_support; + int r = 0; + if (!adev->gmc.gmc_funcs->query_mem_partition_mode) return 0; + nps_switch_support = (hweight32(adev->gmc.supported_nps_modes & + AMDGPU_ALL_NPS_MASK) > 1); + if (!nps_switch_support) + dev_attr_current_memory_partition.attr.mode &= + ~(S_IWUSR | S_IWGRP | S_IWOTH); + else + r = device_create_file(adev->dev, + &dev_attr_available_memory_partition); + + if (r) + return r; + return device_create_file(adev->dev, &dev_attr_current_memory_partition); } void amdgpu_gmc_sysfs_fini(struct amdgpu_device *adev) { + if (!adev->gmc.gmc_funcs->query_mem_partition_mode) + return; + device_remove_file(adev->dev, &dev_attr_current_memory_partition); + device_remove_file(adev->dev, &dev_attr_available_memory_partition); } int amdgpu_gmc_get_nps_memranges(struct amdgpu_device *adev, struct amdgpu_mem_partition_info *mem_ranges, - int exp_ranges) + uint8_t *exp_ranges) { struct amdgpu_gmc_memrange *ranges; int range_cnt, ret, i, j; uint32_t nps_type; + bool refresh; - if (!mem_ranges) + if (!mem_ranges || !exp_ranges) return -EINVAL; + refresh = (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) && + (adev->gmc.reset_flags & AMDGPU_GMC_INIT_RESET_NPS); ret = amdgpu_discovery_get_nps_info(adev, &nps_type, &ranges, - &range_cnt); + &range_cnt, refresh); if (ret) return ret; @@ -1192,16 +1265,16 @@ int amdgpu_gmc_get_nps_memranges(struct amdgpu_device *adev, /* TODO: For now, expect ranges and partition count to be the same. * Adjust if there are holes expected in any NPS domain. */ - if (range_cnt != exp_ranges) { + if (*exp_ranges && (range_cnt != *exp_ranges)) { dev_warn( adev->dev, "NPS config mismatch - expected ranges: %d discovery - nps mode: %d, nps ranges: %d", - exp_ranges, nps_type, range_cnt); + *exp_ranges, nps_type, range_cnt); ret = -EINVAL; goto err; } - for (i = 0; i < exp_ranges; ++i) { + for (i = 0; i < range_cnt; ++i) { if (ranges[i].base_address >= ranges[i].limit_address) { dev_warn( adev->dev, @@ -1242,8 +1315,81 @@ int amdgpu_gmc_get_nps_memranges(struct amdgpu_device *adev, ranges[i].limit_address - ranges[i].base_address + 1; } + if (!*exp_ranges) + *exp_ranges = range_cnt; err: kfree(ranges); return ret; } + +int amdgpu_gmc_request_memory_partition(struct amdgpu_device *adev, + int nps_mode) +{ + /* Not supported on VF devices and APUs */ + if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU)) + return -EOPNOTSUPP; + + if (!adev->psp.funcs) { + dev_err(adev->dev, + "PSP interface not available for nps mode change request"); + return -EINVAL; + } + + return psp_memory_partition(&adev->psp, nps_mode); +} + +static inline bool amdgpu_gmc_need_nps_switch_req(struct amdgpu_device *adev, + int req_nps_mode, + int cur_nps_mode) +{ + return (((BIT(req_nps_mode) & adev->gmc.supported_nps_modes) == + BIT(req_nps_mode)) && + req_nps_mode != cur_nps_mode); +} + +void amdgpu_gmc_prepare_nps_mode_change(struct amdgpu_device *adev) +{ + int req_nps_mode, cur_nps_mode, r; + struct amdgpu_hive_info *hive; + + if (amdgpu_sriov_vf(adev) || !adev->gmc.supported_nps_modes || + !adev->gmc.gmc_funcs->request_mem_partition_mode) + return; + + cur_nps_mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev); + hive = amdgpu_get_xgmi_hive(adev); + if (hive) { + req_nps_mode = atomic_read(&hive->requested_nps_mode); + if (!amdgpu_gmc_need_nps_switch_req(adev, req_nps_mode, + cur_nps_mode)) { + amdgpu_put_xgmi_hive(hive); + return; + } + r = amdgpu_xgmi_request_nps_change(adev, hive, req_nps_mode); + amdgpu_put_xgmi_hive(hive); + goto out; + } + + req_nps_mode = adev->gmc.requested_nps_mode; + if (!amdgpu_gmc_need_nps_switch_req(adev, req_nps_mode, cur_nps_mode)) + return; + + /* even if this fails, we should let driver unload w/o blocking */ + r = adev->gmc.gmc_funcs->request_mem_partition_mode(adev, req_nps_mode); +out: + if (r) + dev_err(adev->dev, "NPS mode change request failed\n"); + else + dev_info( + adev->dev, + "NPS mode change request done, reload driver to complete the change\n"); +} + +bool amdgpu_gmc_need_reset_on_init(struct amdgpu_device *adev) +{ + if (adev->gmc.gmc_funcs->need_reset_on_init) + return adev->gmc.gmc_funcs->need_reset_on_init(adev); + + return false; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index 4d951a1baefa..459a30fe239f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -73,6 +73,13 @@ enum amdgpu_memory_partition { AMDGPU_NPS8_PARTITION_MODE = 8, }; +#define AMDGPU_ALL_NPS_MASK \ + (BIT(AMDGPU_NPS1_PARTITION_MODE) | BIT(AMDGPU_NPS2_PARTITION_MODE) | \ + BIT(AMDGPU_NPS3_PARTITION_MODE) | BIT(AMDGPU_NPS4_PARTITION_MODE) | \ + BIT(AMDGPU_NPS6_PARTITION_MODE) | BIT(AMDGPU_NPS8_PARTITION_MODE)) + +#define AMDGPU_GMC_INIT_RESET_NPS BIT(0) + /* * GMC page fault information */ @@ -161,6 +168,10 @@ struct amdgpu_gmc_funcs { enum amdgpu_memory_partition (*query_mem_partition_mode)( struct amdgpu_device *adev); + /* Request NPS mode */ + int (*request_mem_partition_mode)(struct amdgpu_device *adev, + int nps_mode); + bool (*need_reset_on_init)(struct amdgpu_device *adev); }; struct amdgpu_xgmi_ras { @@ -182,7 +193,6 @@ struct amdgpu_xgmi { bool supported; struct ras_common_if *ras_if; bool connected_to_cpu; - bool pending_reset; struct amdgpu_xgmi_ras *ras; }; @@ -305,6 +315,9 @@ struct amdgpu_gmc { struct amdgpu_mem_partition_info *mem_partitions; uint8_t num_mem_partitions; const struct amdgpu_gmc_funcs *gmc_funcs; + enum amdgpu_memory_partition requested_nps_mode; + uint32_t supported_nps_modes; + uint32_t reset_flags; struct amdgpu_xgmi xgmi; struct amdgpu_irq_src ecc_irq; @@ -447,13 +460,17 @@ void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev); void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev); uint64_t amdgpu_gmc_vram_mc2pa(struct amdgpu_device *adev, uint64_t mc_addr); uint64_t amdgpu_gmc_vram_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo); -uint64_t amdgpu_gmc_vram_cpu_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo); int amdgpu_gmc_vram_checking(struct amdgpu_device *adev); int amdgpu_gmc_sysfs_init(struct amdgpu_device *adev); void amdgpu_gmc_sysfs_fini(struct amdgpu_device *adev); int amdgpu_gmc_get_nps_memranges(struct amdgpu_device *adev, struct amdgpu_mem_partition_info *mem_ranges, - int exp_ranges); + uint8_t *exp_ranges); + +int amdgpu_gmc_request_memory_partition(struct amdgpu_device *adev, + int nps_mode); +void amdgpu_gmc_prepare_nps_mode_change(struct amdgpu_device *adev); +bool amdgpu_gmc_need_reset_on_init(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c index 00d6211e0fbf..f0765ccde668 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c @@ -225,15 +225,6 @@ void amdgpu_i2c_destroy(struct amdgpu_i2c_chan *i2c) kfree(i2c); } -/* Add the default buses */ -void amdgpu_i2c_init(struct amdgpu_device *adev) -{ - if (amdgpu_hw_i2c) - DRM_INFO("hw_i2c forced on, you may experience display detection problems!\n"); - - amdgpu_atombios_i2c_init(adev); -} - /* remove all the buses */ void amdgpu_i2c_fini(struct amdgpu_device *adev) { @@ -247,22 +238,6 @@ void amdgpu_i2c_fini(struct amdgpu_device *adev) } } -/* Add additional buses */ -void amdgpu_i2c_add(struct amdgpu_device *adev, - const struct amdgpu_i2c_bus_rec *rec, - const char *name) -{ - struct drm_device *dev = adev_to_drm(adev); - int i; - - for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++) { - if (!adev->i2c_bus[i]) { - adev->i2c_bus[i] = amdgpu_i2c_create(dev, rec, name); - return; - } - } -} - /* looks up bus based on id */ struct amdgpu_i2c_chan * amdgpu_i2c_lookup(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h index 63c2ff7499e1..21e3d1dad0a1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h @@ -28,11 +28,7 @@ struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev, const struct amdgpu_i2c_bus_rec *rec, const char *name); void amdgpu_i2c_destroy(struct amdgpu_i2c_chan *i2c); -void amdgpu_i2c_init(struct amdgpu_device *adev); void amdgpu_i2c_fini(struct amdgpu_device *adev); -void amdgpu_i2c_add(struct amdgpu_device *adev, - const struct amdgpu_i2c_bus_rec *rec, - const char *name); struct amdgpu_i2c_chan * amdgpu_i2c_lookup(struct amdgpu_device *adev, const struct amdgpu_i2c_bus_rec *i2c_bus); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index 92d27d32de41..8e712a11aba5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -342,15 +342,13 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, * @ring: ring we want to submit job to * @job: job who wants to use the VMID * @id: resulting VMID - * @fence: fence to wait for if no id could be grabbed * * Try to reuse a VMID for this submission. */ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm, struct amdgpu_ring *ring, struct amdgpu_job *job, - struct amdgpu_vmid **id, - struct dma_fence **fence) + struct amdgpu_vmid **id) { struct amdgpu_device *adev = ring->adev; unsigned vmhub = ring->vm_hub; @@ -429,7 +427,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, if (r || !id) goto error; } else { - r = amdgpu_vmid_grab_used(vm, ring, job, &id, fence); + r = amdgpu_vmid_grab_used(vm, ring, job, &id); if (r) goto error; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c index 4766e99dd98f..263ce1811cc8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c @@ -33,33 +33,17 @@ #include "isp_v4_1_0.h" #include "isp_v4_1_1.h" -static int isp_sw_init(void *handle) -{ - return 0; -} - -static int isp_sw_fini(void *handle) -{ - return 0; -} - /** * isp_hw_init - start and test isp block * - * @handle: handle for amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * */ -static int isp_hw_init(void *handle) +static int isp_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_isp *isp = &adev->isp; - const struct amdgpu_ip_block *ip_block = - amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ISP); - - if (!ip_block) - return -EINVAL; - if (isp->funcs->hw_init != NULL) return isp->funcs->hw_init(isp); @@ -69,13 +53,12 @@ static int isp_hw_init(void *handle) /** * isp_hw_fini - stop the hardware block * - * @handle: handle for amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * */ -static int isp_hw_fini(void *handle) +static int isp_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_isp *isp = &adev->isp; + struct amdgpu_isp *isp = &ip_block->adev->isp; if (isp->funcs->hw_fini != NULL) return isp->funcs->hw_fini(isp); @@ -83,16 +66,6 @@ static int isp_hw_fini(void *handle) return -ENODEV; } -static int isp_suspend(void *handle) -{ - return 0; -} - -static int isp_resume(void *handle) -{ - return 0; -} - static int isp_load_fw_by_psp(struct amdgpu_device *adev) { const struct common_firmware_header *hdr; @@ -122,9 +95,10 @@ static int isp_load_fw_by_psp(struct amdgpu_device *adev) return r; } -static int isp_early_init(void *handle) +static int isp_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_isp *isp = &adev->isp; switch (amdgpu_ip_version(adev, ISP_HWIP, 0)) { @@ -154,16 +128,6 @@ static bool isp_is_idle(void *handle) return true; } -static int isp_wait_for_idle(void *handle) -{ - return 0; -} - -static int isp_soft_reset(void *handle) -{ - return 0; -} - static int isp_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -179,16 +143,9 @@ static int isp_set_powergating_state(void *handle, static const struct amd_ip_funcs isp_ip_funcs = { .name = "isp_ip", .early_init = isp_early_init, - .late_init = NULL, - .sw_init = isp_sw_init, - .sw_fini = isp_sw_fini, .hw_init = isp_hw_init, .hw_fini = isp_hw_fini, - .suspend = isp_suspend, - .resume = isp_resume, .is_idle = isp_is_idle, - .wait_for_idle = isp_wait_for_idle, - .soft_reset = isp_soft_reset, .set_clockgating_state = isp_set_clockgating_state, .set_powergating_state = isp_set_powergating_state, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index fffa4430784f..b9d08bc96581 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -42,7 +42,7 @@ static void amdgpu_job_do_core_dump(struct amdgpu_device *adev, for (i = 0; i < adev->num_ip_blocks; i++) if (adev->ip_blocks[i].version->funcs->dump_ip_state) adev->ip_blocks[i].version->funcs - ->dump_ip_state((void *)adev); + ->dump_ip_state((void *)&adev->ip_blocks[i]); dev_info(adev->dev, "Dumping IP State Completed\n"); amdgpu_coredump(adev, true, false, job); @@ -137,6 +137,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) /* attempt a per ring reset */ if (amdgpu_gpu_recovery && ring->funcs->reset) { + dev_err(adev->dev, "Starting %s ring reset\n", s_job->sched->name); /* stop the scheduler, but don't mess with the * bad job yet because if ring reset fails * we'll fall back to full GPU reset. @@ -152,6 +153,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) drm_sched_start(&ring->sched, 0); goto exit; } + dev_err(adev->dev, "Ring %s reset failure\n", ring->sched.name); } if (amdgpu_device_should_recover_gpu(ring->adev)) { @@ -356,10 +358,10 @@ amdgpu_job_prepare_job(struct drm_sched_job *sched_job, if (r) goto error; - if (!fence && job->gang_submit) + if (job->gang_submit) fence = amdgpu_device_switch_gang(ring->adev, job->gang_submit); - while (!fence && job->vm && !job->vmid) { + if (!fence && job->vm && !job->vmid) { r = amdgpu_vmid_grab(job->vm, ring, job, &fence); if (r) { dev_err(ring->adev->dev, "Error getting VM ID (%d)\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c index 6df99cb00d9a..b6d2eb049f54 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c @@ -47,7 +47,7 @@ int amdgpu_jpeg_sw_init(struct amdgpu_device *adev) adev->jpeg.indirect_sram = true; for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) { - if (adev->jpeg.harvest_config & (1 << i)) + if (adev->jpeg.harvest_config & (1U << i)) continue; if (adev->jpeg.indirect_sram) { @@ -73,7 +73,7 @@ int amdgpu_jpeg_sw_fini(struct amdgpu_device *adev) int i, j; for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { - if (adev->jpeg.harvest_config & (1 << i)) + if (adev->jpeg.harvest_config & (1U << i)) continue; amdgpu_bo_free_kernel( @@ -110,7 +110,7 @@ static void amdgpu_jpeg_idle_work_handler(struct work_struct *work) unsigned int i, j; for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { - if (adev->jpeg.harvest_config & (1 << i)) + if (adev->jpeg.harvest_config & (1U << i)) continue; for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) @@ -342,3 +342,113 @@ int amdgpu_jpeg_psp_update_sram(struct amdgpu_device *adev, int inst_idx, return psp_execute_ip_fw_load(&adev->psp, &ucode); } + +/* + * debugfs for to enable/disable jpeg job submission to specific core. + */ +#if defined(CONFIG_DEBUG_FS) +static int amdgpu_debugfs_jpeg_sched_mask_set(void *data, u64 val) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)data; + u32 i, j; + u64 mask = 0; + struct amdgpu_ring *ring; + + if (!adev) + return -ENODEV; + + mask = (1ULL << (adev->jpeg.num_jpeg_inst * adev->jpeg.num_jpeg_rings)) - 1; + if ((val & mask) == 0) + return -EINVAL; + + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { + ring = &adev->jpeg.inst[i].ring_dec[j]; + if (val & (1 << ((i * adev->jpeg.num_jpeg_rings) + j))) + ring->sched.ready = true; + else + ring->sched.ready = false; + } + } + /* publish sched.ready flag update effective immediately across smp */ + smp_rmb(); + return 0; +} + +static int amdgpu_debugfs_jpeg_sched_mask_get(void *data, u64 *val) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)data; + u32 i, j; + u64 mask = 0; + struct amdgpu_ring *ring; + + if (!adev) + return -ENODEV; + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { + ring = &adev->jpeg.inst[i].ring_dec[j]; + if (ring->sched.ready) + mask |= 1ULL << ((i * adev->jpeg.num_jpeg_rings) + j); + } + } + *val = mask; + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_jpeg_sched_mask_fops, + amdgpu_debugfs_jpeg_sched_mask_get, + amdgpu_debugfs_jpeg_sched_mask_set, "%llx\n"); + +#endif + +void amdgpu_debugfs_jpeg_sched_mask_init(struct amdgpu_device *adev) +{ +#if defined(CONFIG_DEBUG_FS) + struct drm_minor *minor = adev_to_drm(adev)->primary; + struct dentry *root = minor->debugfs_root; + char name[32]; + + if (!(adev->jpeg.num_jpeg_inst > 1) && !(adev->jpeg.num_jpeg_rings > 1)) + return; + sprintf(name, "amdgpu_jpeg_sched_mask"); + debugfs_create_file(name, 0600, root, adev, + &amdgpu_debugfs_jpeg_sched_mask_fops); +#endif +} + +static ssize_t amdgpu_get_jpeg_reset_mask(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + + if (!adev) + return -ENODEV; + + return amdgpu_show_reset_mask(buf, adev->jpeg.supported_reset); +} + +static DEVICE_ATTR(jpeg_reset_mask, 0444, + amdgpu_get_jpeg_reset_mask, NULL); + +int amdgpu_jpeg_sysfs_reset_mask_init(struct amdgpu_device *adev) +{ + int r = 0; + + if (adev->jpeg.num_jpeg_inst) { + r = device_create_file(adev->dev, &dev_attr_jpeg_reset_mask); + if (r) + return r; + } + + return r; +} + +void amdgpu_jpeg_sysfs_reset_mask_fini(struct amdgpu_device *adev) +{ + if (adev->dev->kobj.sd) { + if (adev->jpeg.num_jpeg_inst) + device_remove_file(adev->dev, &dev_attr_jpeg_reset_mask); + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h index f9cdd873ac9b..3eb4a4653fce 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h @@ -128,6 +128,7 @@ struct amdgpu_jpeg { uint16_t inst_mask; uint8_t num_inst_per_aid; bool indirect_sram; + uint32_t supported_reset; }; int amdgpu_jpeg_sw_init(struct amdgpu_device *adev); @@ -149,5 +150,8 @@ int amdgpu_jpeg_ras_late_init(struct amdgpu_device *adev, int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev); int amdgpu_jpeg_psp_update_sram(struct amdgpu_device *adev, int inst_idx, enum AMDGPU_UCODE_ID ucode_id); +void amdgpu_debugfs_jpeg_sched_mask_init(struct amdgpu_device *adev); +int amdgpu_jpeg_sysfs_reset_mask_init(struct amdgpu_device *adev); +void amdgpu_jpeg_sysfs_reset_mask_fini(struct amdgpu_device *adev); #endif /*__AMDGPU_JPEG_H__*/ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c index 18ee60378727..3ca03b5e0f91 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c @@ -348,6 +348,24 @@ static bool amdgpu_mca_bank_should_update(struct amdgpu_device *adev, enum amdgp return ret; } +static bool amdgpu_mca_bank_should_dump(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, + struct mca_bank_entry *entry) +{ + bool ret; + + switch (type) { + case AMDGPU_MCA_ERROR_TYPE_CE: + ret = amdgpu_mca_is_deferred_error(adev, entry->regs[MCA_REG_IDX_STATUS]); + break; + case AMDGPU_MCA_ERROR_TYPE_UE: + default: + ret = true; + break; + } + + return ret; +} + static int amdgpu_mca_smu_get_mca_set(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, struct mca_bank_set *mca_set, struct ras_query_context *qctx) { @@ -373,7 +391,8 @@ static int amdgpu_mca_smu_get_mca_set(struct amdgpu_device *adev, enum amdgpu_mc amdgpu_mca_bank_set_add_entry(mca_set, &entry); - amdgpu_mca_smu_mca_bank_dump(adev, i, &entry, qctx); + if (amdgpu_mca_bank_should_dump(adev, type, &entry)) + amdgpu_mca_smu_mca_bank_dump(adev, i, &entry, qctx); } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c index 10b61ff63802..59ec20b07a6a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c @@ -104,7 +104,7 @@ static int amdgpu_mes_event_log_init(struct amdgpu_device *adev) return 0; r = amdgpu_bo_create_kernel(adev, adev->mes.event_log_size, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_GTT, + AMDGPU_GEM_DOMAIN_VRAM, &adev->mes.event_log_gpu_obj, &adev->mes.event_log_gpu_addr, &adev->mes.event_log_cpu_addr); @@ -192,17 +192,6 @@ int amdgpu_mes_init(struct amdgpu_device *adev) (uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs[i]]; } - r = amdgpu_device_wb_get(adev, &adev->mes.read_val_offs); - if (r) { - dev_err(adev->dev, - "(%d) read_val_offs alloc failed\n", r); - goto error; - } - adev->mes.read_val_gpu_addr = - adev->wb.gpu_addr + (adev->mes.read_val_offs * 4); - adev->mes.read_val_ptr = - (uint32_t *)&adev->wb.wb[adev->mes.read_val_offs]; - r = amdgpu_mes_doorbell_init(adev); if (r) goto error; @@ -223,8 +212,6 @@ error: amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs[i]); } - if (adev->mes.read_val_ptr) - amdgpu_device_wb_free(adev, adev->mes.read_val_offs); idr_destroy(&adev->mes.pasid_idr); idr_destroy(&adev->mes.gang_id_idr); @@ -249,8 +236,6 @@ void amdgpu_mes_fini(struct amdgpu_device *adev) amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs[i]); } - if (adev->mes.read_val_ptr) - amdgpu_device_wb_free(adev, adev->mes.read_val_offs); amdgpu_mes_doorbell_free(adev); @@ -905,7 +890,7 @@ int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev, queue_input.me_id = ring->me; queue_input.pipe_id = ring->pipe; queue_input.queue_id = ring->queue; - queue_input.mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj); + queue_input.mqd_addr = ring->mqd_obj ? amdgpu_bo_gpu_offset(ring->mqd_obj) : 0; queue_input.wptr_addr = ring->wptr_gpu_addr; queue_input.vmid = vmid; queue_input.use_mmio = use_mmio; @@ -921,10 +906,19 @@ uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg) { struct mes_misc_op_input op_input; int r, val = 0; + uint32_t addr_offset = 0; + uint64_t read_val_gpu_addr; + uint32_t *read_val_ptr; + if (amdgpu_device_wb_get(adev, &addr_offset)) { + DRM_ERROR("critical bug! too many mes readers\n"); + goto error; + } + read_val_gpu_addr = adev->wb.gpu_addr + (addr_offset * 4); + read_val_ptr = (uint32_t *)&adev->wb.wb[addr_offset]; op_input.op = MES_MISC_OP_READ_REG; op_input.read_reg.reg_offset = reg; - op_input.read_reg.buffer_addr = adev->mes.read_val_gpu_addr; + op_input.read_reg.buffer_addr = read_val_gpu_addr; if (!adev->mes.funcs->misc_op) { DRM_ERROR("mes rreg is not supported!\n"); @@ -935,9 +929,11 @@ uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg) if (r) DRM_ERROR("failed to read reg (0x%x)\n", reg); else - val = *(adev->mes.read_val_ptr); + val = *(read_val_ptr); error: + if (addr_offset) + amdgpu_device_wb_free(adev, addr_offset); return val; } @@ -1203,8 +1199,10 @@ int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id, r = amdgpu_ring_init(adev, ring, 1024, NULL, 0, AMDGPU_RING_PRIO_DEFAULT, NULL); - if (r) + if (r) { + amdgpu_mes_unlock(&adev->mes); goto clean_up_memory; + } amdgpu_mes_ring_to_queue_props(adev, ring, &qprops); @@ -1237,7 +1235,6 @@ clean_up_ring: amdgpu_ring_fini(ring); clean_up_memory: kfree(ring); - amdgpu_mes_unlock(&adev->mes); return r; } @@ -1593,6 +1590,7 @@ int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe) char ucode_prefix[30]; char fw_name[50]; bool need_retry = false; + u32 *ucode_ptr; int r; amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, @@ -1630,6 +1628,10 @@ int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe) adev->mes.data_start_addr[pipe] = le32_to_cpu(mes_hdr->mes_data_start_addr_lo) | ((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32); + ucode_ptr = (u32 *)(adev->mes.fw[pipe]->data + + sizeof(union amdgpu_firmware_header)); + adev->mes.fw_version[pipe] = + le32_to_cpu(ucode_ptr[24]) & AMDGPU_MES_VERSION_MASK; if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { int ucode, ucode_data; @@ -1676,6 +1678,29 @@ bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev) return is_supported; } +/* Fix me -- node_id is used to identify the correct MES instances in the future */ +int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev, uint32_t node_id, bool enable) +{ + struct mes_misc_op_input op_input = {0}; + int r; + + op_input.op = MES_MISC_OP_CHANGE_CONFIG; + op_input.change_config.option.limit_single_process = enable ? 1 : 0; + + if (!adev->mes.funcs->misc_op) { + dev_err(adev->dev, "mes change config is not supported!\n"); + r = -EINVAL; + goto error; + } + + r = adev->mes.funcs->misc_op(&adev->mes, &op_input); + if (r) + dev_err(adev->dev, "failed to change_config.\n"); + +error: + return r; +} + #if defined(CONFIG_DEBUG_FS) static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h index 96788c0f42f1..c6f93cbd6739 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h @@ -40,6 +40,7 @@ #define AMDGPU_MES_VERSION_MASK 0x00000fff #define AMDGPU_MES_API_VERSION_MASK 0x00fff000 #define AMDGPU_MES_FEAT_VERSION_MASK 0xff000000 +#define AMDGPU_MES_MSCRATCH_SIZE 0x8000 enum amdgpu_mes_priority_level { AMDGPU_MES_PRIORITY_LEVEL_LOW = 0, @@ -75,6 +76,7 @@ struct amdgpu_mes { uint32_t sched_version; uint32_t kiq_version; + uint32_t fw_version[AMDGPU_MAX_MES_PIPES]; bool enable_legacy_queue_map; uint32_t total_max_queue; @@ -119,9 +121,6 @@ struct amdgpu_mes { uint32_t query_status_fence_offs[AMDGPU_MAX_MES_PIPES]; uint64_t query_status_fence_gpu_addr[AMDGPU_MAX_MES_PIPES]; uint64_t *query_status_fence_ptr[AMDGPU_MAX_MES_PIPES]; - uint32_t read_val_offs; - uint64_t read_val_gpu_addr; - uint32_t *read_val_ptr; uint32_t saved_flags; @@ -310,6 +309,7 @@ enum mes_misc_opcode { MES_MISC_OP_WRM_REG_WAIT, MES_MISC_OP_WRM_REG_WR_WAIT, MES_MISC_OP_SET_SHADER_DEBUGGER, + MES_MISC_OP_CHANGE_CONFIG, }; struct mes_misc_op_input { @@ -348,6 +348,21 @@ struct mes_misc_op_input { uint32_t tcp_watch_cntl[4]; uint32_t trap_en; } set_shader_debugger; + + struct { + union { + struct { + uint32_t limit_single_process : 1; + uint32_t enable_hws_logging_buffer : 1; + uint32_t reserved : 30; + }; + uint32_t all; + } option; + struct { + uint32_t tdr_level; + uint32_t tdr_delay; + } tdr_config; + } change_config; }; }; @@ -518,4 +533,7 @@ static inline void amdgpu_mes_unlock(struct amdgpu_mes *mes) } bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev); + +int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev, uint32_t node_id, bool enable); + #endif /* __AMDGPU_MES_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h index f61d117b0caf..79c2f807b9fe 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h @@ -101,6 +101,7 @@ struct amdgpu_nbio_funcs { int (*get_compute_partition_mode)(struct amdgpu_device *adev); u32 (*get_memory_partition_mode)(struct amdgpu_device *adev, u32 *supp_modes); + bool (*is_nps_switch_requested)(struct amdgpu_device *adev); u64 (*get_pcie_replay_count)(struct amdgpu_device *adev); void (*set_reg_remap)(struct amdgpu_device *adev); }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 44819cdba7fb..6852d50caa89 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -40,6 +40,7 @@ #include "amdgpu_trace.h" #include "amdgpu_amdkfd.h" #include "amdgpu_vram_mgr.h" +#include "amdgpu_vm.h" /** * DOC: amdgpu_object @@ -161,7 +162,8 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain) * When GTT is just an alternative to VRAM make sure that we * only use it as fallback and still try to fill up VRAM first. */ - if (domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) + if (domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM && + !(adev->flags & AMD_IS_APU)) places[c].flags |= TTM_PL_FLAG_FALLBACK; c++; } @@ -1170,54 +1172,71 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, } void amdgpu_bo_get_memory(struct amdgpu_bo *bo, - struct amdgpu_mem_stats *stats) + struct amdgpu_mem_stats *stats, + unsigned int sz) { + const unsigned int domain_to_pl[] = { + [ilog2(AMDGPU_GEM_DOMAIN_CPU)] = TTM_PL_SYSTEM, + [ilog2(AMDGPU_GEM_DOMAIN_GTT)] = TTM_PL_TT, + [ilog2(AMDGPU_GEM_DOMAIN_VRAM)] = TTM_PL_VRAM, + [ilog2(AMDGPU_GEM_DOMAIN_GDS)] = AMDGPU_PL_GDS, + [ilog2(AMDGPU_GEM_DOMAIN_GWS)] = AMDGPU_PL_GWS, + [ilog2(AMDGPU_GEM_DOMAIN_OA)] = AMDGPU_PL_OA, + [ilog2(AMDGPU_GEM_DOMAIN_DOORBELL)] = AMDGPU_PL_DOORBELL, + }; struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); struct ttm_resource *res = bo->tbo.resource; + struct drm_gem_object *obj = &bo->tbo.base; uint64_t size = amdgpu_bo_size(bo); - struct drm_gem_object *obj; - bool shared; + unsigned int type; + + if (!res) { + /* + * If no backing store use one of the preferred domain for basic + * stats. We take the MSB since that should give a reasonable + * view. + */ + BUILD_BUG_ON(TTM_PL_VRAM < TTM_PL_TT || + TTM_PL_VRAM < TTM_PL_SYSTEM); + type = fls(bo->preferred_domains & AMDGPU_GEM_DOMAIN_MASK); + if (!type) + return; + type--; + if (drm_WARN_ON_ONCE(&adev->ddev, + type >= ARRAY_SIZE(domain_to_pl))) + return; + type = domain_to_pl[type]; + } else { + type = res->mem_type; + } - /* Abort if the BO doesn't currently have a backing store */ - if (!res) + if (drm_WARN_ON_ONCE(&adev->ddev, type >= sz)) return; - obj = &bo->tbo.base; - shared = drm_gem_object_is_shared_for_memory_stats(obj); - - switch (res->mem_type) { - case TTM_PL_VRAM: - stats->vram += size; - if (amdgpu_res_cpu_visible(adev, res)) - stats->visible_vram += size; - if (shared) - stats->vram_shared += size; - break; - case TTM_PL_TT: - stats->gtt += size; - if (shared) - stats->gtt_shared += size; - break; - case TTM_PL_SYSTEM: - default: - stats->cpu += size; - if (shared) - stats->cpu_shared += size; - break; + /* DRM stats common fields: */ + + if (drm_gem_object_is_shared_for_memory_stats(obj)) + stats[type].drm.shared += size; + else + stats[type].drm.private += size; + + if (res) { + stats[type].drm.resident += size; + + if (!dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_BOOKKEEP)) + stats[type].drm.active += size; + else if (bo->flags & AMDGPU_GEM_CREATE_DISCARDABLE) + stats[type].drm.purgeable += size; } + /* amdgpu specific stats: */ + if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) { - stats->requested_vram += size; - if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) - stats->requested_visible_vram += size; - - if (res->mem_type != TTM_PL_VRAM) { - stats->evicted_vram += size; - if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) - stats->evicted_visible_vram += size; - } + stats[TTM_PL_VRAM].requested += size; + if (type != TTM_PL_VRAM) + stats[TTM_PL_VRAM].evicted += size; } else if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_GTT) { - stats->requested_gtt += size; + stats[TTM_PL_TT].requested += size; } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 717e47b46167..be6769852ece 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -139,33 +139,6 @@ struct amdgpu_bo_vm { struct amdgpu_vm_bo_base entries[]; }; -struct amdgpu_mem_stats { - /* current VRAM usage, includes visible VRAM */ - uint64_t vram; - /* current shared VRAM usage, includes visible VRAM */ - uint64_t vram_shared; - /* current visible VRAM usage */ - uint64_t visible_vram; - /* current GTT usage */ - uint64_t gtt; - /* current shared GTT usage */ - uint64_t gtt_shared; - /* current system memory usage */ - uint64_t cpu; - /* current shared system memory usage */ - uint64_t cpu_shared; - /* sum of evicted buffers, includes visible VRAM */ - uint64_t evicted_vram; - /* sum of evicted buffers due to CPU access */ - uint64_t evicted_visible_vram; - /* how much userspace asked for, includes vis.VRAM */ - uint64_t requested_vram; - /* how much userspace asked for */ - uint64_t requested_visible_vram; - /* how much userspace asked for */ - uint64_t requested_gtt; -}; - static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo) { return container_of(tbo, struct amdgpu_bo, tbo); @@ -328,7 +301,8 @@ int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr); u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo); u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo); void amdgpu_bo_get_memory(struct amdgpu_bo *bo, - struct amdgpu_mem_stats *stats); + struct amdgpu_mem_stats *stats, + unsigned int size); uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev, uint32_t domain); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c index e8adfd0a570a..34b5e22b44e5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c @@ -137,7 +137,8 @@ void amdgpu_preempt_mgr_fini(struct amdgpu_device *adev) if (ret) return; - device_remove_file(adev->dev, &dev_attr_mem_info_preempt_used); + if (adev->dev->kobj.sd) + device_remove_file(adev->dev, &dev_attr_mem_info_preempt_used); ttm_resource_manager_cleanup(man); ttm_set_driver_manager(&adev->mman.bdev, AMDGPU_PL_PREEMPT, NULL); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 0b28b2cf1517..448f9e742983 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -159,9 +159,9 @@ static int psp_init_sriov_microcode(struct psp_context *psp) return ret; } -static int psp_early_init(void *handle) +static int psp_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct psp_context *psp = &adev->psp; psp->autoload_supported = true; @@ -421,9 +421,9 @@ static bool psp_get_runtime_db_entry(struct amdgpu_device *adev, return ret; } -static int psp_sw_init(void *handle) +static int psp_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct psp_context *psp = &adev->psp; int ret; struct psp_runtime_boot_cfg_entry boot_cfg_entry; @@ -527,9 +527,9 @@ failed1: return ret; } -static int psp_sw_fini(void *handle) +static int psp_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct psp_context *psp = &adev->psp; struct psp_gfx_cmd_resp *cmd = psp->cmd; @@ -639,6 +639,8 @@ static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id) return "AUTOLOAD_RLC"; case GFX_CMD_ID_BOOT_CFG: return "BOOT_CFG"; + case GFX_CMD_ID_CONFIG_SQ_PERFMON: + return "CONFIG_SQ_PERFMON"; default: return "UNKNOWN CMD"; } @@ -1043,6 +1045,31 @@ static int psp_rl_load(struct amdgpu_device *adev) return ret; } +int psp_memory_partition(struct psp_context *psp, int mode) +{ + struct psp_gfx_cmd_resp *cmd; + int ret; + + if (amdgpu_sriov_vf(psp->adev)) + return 0; + + cmd = acquire_psp_cmd_buf(psp); + + cmd->cmd_id = GFX_CMD_ID_FB_NPS_MODE; + cmd->cmd.cmd_memory_part.mode = mode; + + dev_info(psp->adev->dev, + "Requesting %d memory partition change through PSP", mode); + ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); + if (ret) + dev_err(psp->adev->dev, + "PSP request failed to change to NPS%d mode\n", mode); + + release_psp_cmd_buf(psp); + + return ret; +} + int psp_spatial_partition(struct psp_context *psp, int mode) { struct psp_gfx_cmd_resp *cmd; @@ -1807,6 +1834,9 @@ int psp_ras_initialize(struct psp_context *psp) ras_cmd->ras_in_message.init_flags.xcc_mask = adev->gfx.xcc_mask; ras_cmd->ras_in_message.init_flags.channel_dis_num = hweight32(adev->gmc.m_half_use) * 2; + if (adev->gmc.gmc_funcs->query_mem_partition_mode) + ras_cmd->ras_in_message.init_flags.nps_mode = + adev->gmc.gmc_funcs->query_mem_partition_mode(adev); ret = psp_ta_load(psp, &psp->ras_context.context); @@ -2264,6 +2294,19 @@ bool amdgpu_psp_get_ras_capability(struct psp_context *psp) } } +bool amdgpu_psp_tos_reload_needed(struct amdgpu_device *adev) +{ + struct psp_context *psp = &adev->psp; + + if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU)) + return false; + + if (psp->funcs && psp->funcs->is_reload_needed) + return psp->funcs->is_reload_needed(psp); + + return false; +} + static int psp_hw_start(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; @@ -2958,10 +3001,10 @@ failed: return ret; } -static int psp_hw_init(void *handle) +static int psp_hw_init(struct amdgpu_ip_block *ip_block) { int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; mutex_lock(&adev->firmware.mutex); /* @@ -2987,9 +3030,9 @@ failed: return -EINVAL; } -static int psp_hw_fini(void *handle) +static int psp_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct psp_context *psp = &adev->psp; if (psp->ta_fw) { @@ -3011,10 +3054,10 @@ static int psp_hw_fini(void *handle) return 0; } -static int psp_suspend(void *handle) +static int psp_suspend(struct amdgpu_ip_block *ip_block) { int ret = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct psp_context *psp = &adev->psp; if (adev->gmc.xgmi.num_physical_nodes > 1 && @@ -3074,10 +3117,10 @@ out: return ret; } -static int psp_resume(void *handle) +static int psp_resume(struct amdgpu_ip_block *ip_block) { int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct psp_context *psp = &adev->psp; dev_info(adev->dev, "PSP is resuming...\n"); @@ -3523,6 +3566,36 @@ out: return err; } +static bool is_ta_fw_applicable(struct psp_context *psp, + const struct psp_fw_bin_desc *desc) +{ + struct amdgpu_device *adev = psp->adev; + uint32_t fw_version; + + switch (desc->fw_type) { + case TA_FW_TYPE_PSP_XGMI: + case TA_FW_TYPE_PSP_XGMI_AUX: + /* for now, AUX TA only exists on 13.0.6 ta bin, + * from v20.00.0x.14 + */ + if (amdgpu_ip_version(adev, MP0_HWIP, 0) == + IP_VERSION(13, 0, 6)) { + fw_version = le32_to_cpu(desc->fw_version); + + if (adev->flags & AMD_IS_APU && + (fw_version & 0xff) >= 0x14) + return desc->fw_type == TA_FW_TYPE_PSP_XGMI_AUX; + else + return desc->fw_type == TA_FW_TYPE_PSP_XGMI; + } + break; + default: + break; + } + + return true; +} + static int parse_ta_bin_descriptor(struct psp_context *psp, const struct psp_fw_bin_desc *desc, const struct ta_firmware_header_v2_0 *ta_hdr) @@ -3532,6 +3605,9 @@ static int parse_ta_bin_descriptor(struct psp_context *psp, if (!psp || !desc || !ta_hdr) return -EINVAL; + if (!is_ta_fw_applicable(psp, desc)) + return 0; + ucode_start_addr = (uint8_t *)ta_hdr + le32_to_cpu(desc->offset_bytes) + le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); @@ -3544,6 +3620,7 @@ static int parse_ta_bin_descriptor(struct psp_context *psp, psp->asd_context.bin_desc.start_addr = ucode_start_addr; break; case TA_FW_TYPE_PSP_XGMI: + case TA_FW_TYPE_PSP_XGMI_AUX: psp->xgmi_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); psp->xgmi_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); psp->xgmi_context.context.bin_desc.start_addr = ucode_start_addr; @@ -3736,8 +3813,44 @@ out: return err; } +int psp_config_sq_perfmon(struct psp_context *psp, + uint32_t xcp_id, bool core_override_enable, + bool reg_override_enable, bool perfmon_override_enable) +{ + int ret; + + if (amdgpu_sriov_vf(psp->adev)) + return 0; + + if (xcp_id > MAX_XCP) { + dev_err(psp->adev->dev, "invalid xcp_id %d\n", xcp_id); + return -EINVAL; + } + + if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6)) { + dev_err(psp->adev->dev, "Unsupported MP0 version 0x%x for CONFIG_SQ_PERFMON command\n", + amdgpu_ip_version(psp->adev, MP0_HWIP, 0)); + return -EINVAL; + } + struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); + + cmd->cmd_id = GFX_CMD_ID_CONFIG_SQ_PERFMON; + cmd->cmd.config_sq_perfmon.gfx_xcp_mask = BIT_MASK(xcp_id); + cmd->cmd.config_sq_perfmon.core_override = core_override_enable; + cmd->cmd.config_sq_perfmon.reg_override = reg_override_enable; + cmd->cmd.config_sq_perfmon.perfmon_override = perfmon_override_enable; + + ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); + if (ret) + dev_warn(psp->adev->dev, "PSP failed to config sq: xcp%d core%d reg%d perfmon%d\n", + xcp_id, core_override_enable, reg_override_enable, perfmon_override_enable); + + release_psp_cmd_buf(psp); + return ret; +} + static int psp_set_clockgating_state(void *handle, - enum amd_clockgating_state state) + enum amd_clockgating_state state) { return 0; } @@ -3999,7 +4112,7 @@ static umode_t amdgpu_flash_attr_is_visible(struct kobject *kobj, struct attribu } static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj, - struct bin_attribute *attr, + const struct bin_attribute *attr, int idx) { struct device *dev = kobj_to_dev(kobj); @@ -4019,17 +4132,12 @@ const struct attribute_group amdgpu_flash_attr_group = { const struct amd_ip_funcs psp_ip_funcs = { .name = "psp", .early_init = psp_early_init, - .late_init = NULL, .sw_init = psp_sw_init, .sw_fini = psp_sw_fini, .hw_init = psp_hw_init, .hw_fini = psp_hw_fini, .suspend = psp_suspend, .resume = psp_resume, - .is_idle = NULL, - .check_soft_reset = NULL, - .wait_for_idle = NULL, - .soft_reset = NULL, .set_clockgating_state = psp_set_clockgating_state, .set_powergating_state = psp_set_powergating_state, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index e8abbbcb4326..567cb1f924ca 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -139,6 +139,7 @@ struct psp_funcs { int (*fatal_error_recovery_quirk)(struct psp_context *psp); bool (*get_ras_capability)(struct psp_context *psp); bool (*is_aux_sos_load_required)(struct psp_context *psp); + bool (*is_reload_needed)(struct psp_context *psp); }; struct ta_funcs { @@ -552,9 +553,15 @@ int psp_load_fw_list(struct psp_context *psp, void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size); int psp_spatial_partition(struct psp_context *psp, int mode); +int psp_memory_partition(struct psp_context *psp, int mode); int is_psp_fw_valid(struct psp_bin_desc bin); int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev); bool amdgpu_psp_get_ras_capability(struct psp_context *psp); + +int psp_config_sq_perfmon(struct psp_context *psp, uint32_t xcp_id, + bool core_override_enable, bool reg_override_enable, bool perfmon_override_enable); +bool amdgpu_psp_tos_reload_needed(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 1a1395c5fff1..4c9fa24dd972 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -1214,6 +1214,42 @@ static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev, } } +static void amdgpu_ras_virt_error_generate_report(struct amdgpu_device *adev, + struct ras_query_if *query_if, + struct ras_err_data *err_data, + struct ras_query_context *qctx) +{ + unsigned long new_ue, new_ce, new_de; + struct ras_manager *obj = amdgpu_ras_find_obj(adev, &query_if->head); + const char *blk_name = get_ras_block_str(&query_if->head); + u64 event_id = qctx->evid.event_id; + + new_ce = err_data->ce_count - obj->err_data.ce_count; + new_ue = err_data->ue_count - obj->err_data.ue_count; + new_de = err_data->de_count - obj->err_data.de_count; + + if (new_ce) { + RAS_EVENT_LOG(adev, event_id, "%lu correctable hardware errors " + "detected in %s block\n", + new_ce, + blk_name); + } + + if (new_ue) { + RAS_EVENT_LOG(adev, event_id, "%lu uncorrectable hardware errors " + "detected in %s block\n", + new_ue, + blk_name); + } + + if (new_de) { + RAS_EVENT_LOG(adev, event_id, "%lu deferred hardware errors " + "detected in %s block\n", + new_de, + blk_name); + } +} + static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, struct ras_err_data *err_data) { struct ras_err_node *err_node; @@ -1237,6 +1273,15 @@ static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, s } } +static void amdgpu_ras_mgr_virt_error_data_statistics_update(struct ras_manager *obj, + struct ras_err_data *err_data) +{ + /* Host reports absolute counts */ + obj->err_data.ue_count = err_data->ue_count; + obj->err_data.ce_count = err_data->ce_count; + obj->err_data.de_count = err_data->de_count; +} + static struct ras_manager *get_ras_manager(struct amdgpu_device *adev, enum amdgpu_ras_block blk) { struct ras_common_if head; @@ -1253,7 +1298,7 @@ int amdgpu_ras_bind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk, struct ras_manager *obj; /* in resume phase, no need to create aca fs node */ - if (adev->in_suspend || amdgpu_in_reset(adev)) + if (adev->in_suspend || amdgpu_reset_in_recovery(adev)) return 0; obj = get_ras_manager(adev, blk); @@ -1323,7 +1368,9 @@ static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev, if (error_query_mode == AMDGPU_RAS_INVALID_ERROR_QUERY) return -EINVAL; - if (error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) { + if (error_query_mode == AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY) { + return amdgpu_virt_req_ras_err_count(adev, blk, err_data); + } else if (error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) { if (info->head.block == AMDGPU_RAS_BLOCK__UMC) { amdgpu_ras_get_ecc_info(adev, err_data); } else { @@ -1405,14 +1452,22 @@ static int amdgpu_ras_query_error_status_with_event(struct amdgpu_device *adev, if (ret) goto out_fini_err_data; - amdgpu_rasmgr_error_data_statistic_update(obj, &err_data); + if (error_query_mode != AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY) { + amdgpu_rasmgr_error_data_statistic_update(obj, &err_data); + amdgpu_ras_error_generate_report(adev, info, &err_data, &qctx); + } else { + /* Host provides absolute error counts. First generate the report + * using the previous VF internal count against new host count. + * Then Update VF internal count. + */ + amdgpu_ras_virt_error_generate_report(adev, info, &err_data, &qctx); + amdgpu_ras_mgr_virt_error_data_statistics_update(obj, &err_data); + } info->ue_count = obj->err_data.ue_count; info->ce_count = obj->err_data.ce_count; info->de_count = obj->err_data.de_count; - amdgpu_ras_error_generate_report(adev, info, &err_data, &qctx); - out_fini_err_data: amdgpu_ras_error_data_fini(&err_data); @@ -2605,6 +2660,7 @@ static void amdgpu_ras_do_recovery(struct work_struct *work) reset_context.method = AMD_RESET_METHOD_NONE; reset_context.reset_req_dev = adev; reset_context.src = AMDGPU_RESET_SRC_RAS; + set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags); /* Perform full reset in fatal error mode */ if (!amdgpu_ras_is_poison_mode_supported(ras->adev)) @@ -3146,7 +3202,42 @@ static int amdgpu_ras_page_retirement_thread(void *param) return 0; } -int amdgpu_ras_recovery_init(struct amdgpu_device *adev) +int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + int ret; + + if (!con || amdgpu_sriov_vf(adev)) + return 0; + + ret = amdgpu_ras_eeprom_init(&con->eeprom_control); + + if (ret) + return ret; + + /* HW not usable */ + if (amdgpu_ras_is_rma(adev)) + return -EHWPOISON; + + if (con->eeprom_control.ras_num_recs) { + ret = amdgpu_ras_load_bad_pages(adev); + if (ret) + return ret; + + amdgpu_dpm_send_hbm_bad_pages_num( + adev, con->eeprom_control.ras_num_recs); + + if (con->update_channel_flag == true) { + amdgpu_dpm_send_hbm_bad_channel_flag( + adev, con->eeprom_control.bad_channel_bitmap); + con->update_channel_flag = false; + } + } + + return ret; +} + +int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_err_handler_data **data; @@ -3181,31 +3272,10 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev) max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(&con->eeprom_control); amdgpu_ras_validate_threshold(adev, max_eeprom_records_count); - /* Todo: During test the SMU might fail to read the eeprom through I2C - * when the GPU is pending on XGMI reset during probe time - * (Mostly after second bus reset), skip it now - */ - if (adev->gmc.xgmi.pending_reset) - return 0; - ret = amdgpu_ras_eeprom_init(&con->eeprom_control); - /* - * This calling fails when is_rma is true or - * ret != 0. - */ - if (amdgpu_ras_is_rma(adev) || ret) - goto free; - - if (con->eeprom_control.ras_num_recs) { - ret = amdgpu_ras_load_bad_pages(adev); + if (init_bp_info) { + ret = amdgpu_ras_init_badpage_info(adev); if (ret) goto free; - - amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs); - - if (con->update_channel_flag == true) { - amdgpu_dpm_send_hbm_bad_channel_flag(adev, con->eeprom_control.bad_channel_bitmap); - con->update_channel_flag = false; - } } mutex_init(&con->page_rsv_lock); @@ -3438,6 +3508,11 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev) if (!amdgpu_ras_asic_supported(adev)) return; + if (amdgpu_sriov_vf(adev)) { + if (amdgpu_virt_get_ras_capability(adev)) + goto init_ras_enabled_flag; + } + /* query ras capability from psp */ if (amdgpu_psp_get_ras_capability(&adev->psp)) goto init_ras_enabled_flag; @@ -3535,7 +3610,7 @@ static void amdgpu_ras_event_mgr_init(struct amdgpu_device *adev) ras->event_mgr = hive ? &hive->event_mgr : &ras->__event_mgr; /* init event manager with node 0 on xgmi system */ - if (!amdgpu_in_reset(adev)) { + if (!amdgpu_reset_in_recovery(adev)) { if (!hive || adev->gmc.xgmi.node_id == 0) ras_event_mgr_init(ras->event_mgr); } @@ -3750,7 +3825,7 @@ int amdgpu_ras_block_late_init(struct amdgpu_device *adev, r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1); if (r) { - if (adev->in_suspend || amdgpu_in_reset(adev)) { + if (adev->in_suspend || amdgpu_reset_in_recovery(adev)) { /* in resume phase, if fail to enable ras, * clean up all ras fs nodes, and disable ras */ goto cleanup; @@ -3762,7 +3837,7 @@ int amdgpu_ras_block_late_init(struct amdgpu_device *adev, amdgpu_persistent_edc_harvesting(adev, ras_block); /* in resume phase, no need to create ras fs node */ - if (adev->in_suspend || amdgpu_in_reset(adev)) + if (adev->in_suspend || amdgpu_reset_in_recovery(adev)) return 0; ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm); @@ -3892,7 +3967,7 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev) amdgpu_ras_event_mgr_init(adev); if (amdgpu_ras_aca_is_supported(adev)) { - if (amdgpu_in_reset(adev)) { + if (amdgpu_reset_in_recovery(adev)) { if (amdgpu_aca_is_enabled(adev)) r = amdgpu_aca_reset(adev); else @@ -3910,7 +3985,7 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev) } /* Guest side doesn't need init ras feature */ - if (amdgpu_sriov_vf(adev)) + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_ras_telemetry_en(adev)) return 0; list_for_each_entry_safe(node, tmp, &adev->ras_list, node) { @@ -4294,8 +4369,27 @@ int amdgpu_ras_reset_gpu(struct amdgpu_device *adev) ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET; } - if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0) + if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0) { + struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); + int hive_ras_recovery = 0; + + if (hive) { + hive_ras_recovery = atomic_read(&hive->ras_recovery); + amdgpu_put_xgmi_hive(hive); + } + /* In the case of multiple GPUs, after a GPU has started + * resetting all GPUs on hive, other GPUs do not need to + * trigger GPU reset again. + */ + if (!hive_ras_recovery) + amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work); + else + atomic_set(&ras->in_recovery, 0); + } else { + flush_work(&ras->recovery_work); amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work); + } + return 0; } @@ -4358,11 +4452,14 @@ bool amdgpu_ras_get_error_query_mode(struct amdgpu_device *adev, return false; } - if ((smu_funcs && smu_funcs->set_debug_mode) || (mca_funcs && mca_funcs->mca_set_debug_mode)) + if (amdgpu_sriov_vf(adev)) { + *error_query_mode = AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY; + } else if ((smu_funcs && smu_funcs->set_debug_mode) || (mca_funcs && mca_funcs->mca_set_debug_mode)) { *error_query_mode = (con->is_aca_debug_mode) ? AMDGPU_RAS_DIRECT_ERROR_QUERY : AMDGPU_RAS_FIRMWARE_ERROR_QUERY; - else + } else { *error_query_mode = AMDGPU_RAS_DIRECT_ERROR_QUERY; + } return true; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index 669720a9c60a..6db772ecfee4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -365,6 +365,7 @@ enum amdgpu_ras_error_query_mode { AMDGPU_RAS_INVALID_ERROR_QUERY = 0, AMDGPU_RAS_DIRECT_ERROR_QUERY = 1, AMDGPU_RAS_FIRMWARE_ERROR_QUERY = 2, + AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY = 3, }; /* ras error status reisger fields */ @@ -736,8 +737,8 @@ struct amdgpu_ras_block_hw_ops { * 8: feature disable */ - -int amdgpu_ras_recovery_init(struct amdgpu_device *adev); +int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev); +int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info); void amdgpu_ras_resume(struct amdgpu_device *adev); void amdgpu_ras_suspend(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c index 66c1a868c0e1..a0acb65f4b40 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c @@ -26,6 +26,156 @@ #include "sienna_cichlid.h" #include "smu_v13_0_10.h" +static int amdgpu_reset_xgmi_reset_on_init_suspend(struct amdgpu_device *adev) +{ + int i; + + for (i = adev->num_ip_blocks - 1; i >= 0; i--) { + if (!adev->ip_blocks[i].status.valid) + continue; + if (!adev->ip_blocks[i].status.hw) + continue; + /* displays are handled in phase1 */ + if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) + continue; + + /* XXX handle errors */ + amdgpu_ip_block_suspend(&adev->ip_blocks[i]); + adev->ip_blocks[i].status.hw = false; + } + + /* VCN FW shared region is in frambuffer, there are some flags + * initialized in that region during sw_init. Make sure the region is + * backed up. + */ + amdgpu_vcn_save_vcpu_bo(adev); + + return 0; +} + +static int amdgpu_reset_xgmi_reset_on_init_prep_hwctxt( + struct amdgpu_reset_control *reset_ctl, + struct amdgpu_reset_context *reset_context) +{ + struct list_head *reset_device_list = reset_context->reset_device_list; + struct amdgpu_device *tmp_adev; + int r; + + list_for_each_entry(tmp_adev, reset_device_list, reset_list) { + amdgpu_unregister_gpu_instance(tmp_adev); + r = amdgpu_reset_xgmi_reset_on_init_suspend(tmp_adev); + if (r) { + dev_err(tmp_adev->dev, + "xgmi reset on init: prepare for reset failed"); + return r; + } + } + + return r; +} + +static int amdgpu_reset_xgmi_reset_on_init_restore_hwctxt( + struct amdgpu_reset_control *reset_ctl, + struct amdgpu_reset_context *reset_context) +{ + struct list_head *reset_device_list = reset_context->reset_device_list; + struct amdgpu_device *tmp_adev = NULL; + int r; + + r = amdgpu_device_reinit_after_reset(reset_context); + if (r) + return r; + list_for_each_entry(tmp_adev, reset_device_list, reset_list) { + if (!tmp_adev->kfd.init_complete) { + kgd2kfd_init_zone_device(tmp_adev); + amdgpu_amdkfd_device_init(tmp_adev); + amdgpu_amdkfd_drm_client_create(tmp_adev); + } + } + + return r; +} + +static int amdgpu_reset_xgmi_reset_on_init_perform_reset( + struct amdgpu_reset_control *reset_ctl, + struct amdgpu_reset_context *reset_context) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle; + struct list_head *reset_device_list = reset_context->reset_device_list; + struct amdgpu_device *tmp_adev = NULL; + int r; + + dev_dbg(adev->dev, "xgmi roi - hw reset\n"); + + list_for_each_entry(tmp_adev, reset_device_list, reset_list) { + mutex_lock(&tmp_adev->reset_cntl->reset_lock); + tmp_adev->reset_cntl->active_reset = + amdgpu_asic_reset_method(adev); + } + r = 0; + /* Mode1 reset needs to be triggered on all devices together */ + list_for_each_entry(tmp_adev, reset_device_list, reset_list) { + /* For XGMI run all resets in parallel to speed up the process */ + if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work)) + r = -EALREADY; + if (r) { + dev_err(tmp_adev->dev, + "xgmi reset on init: reset failed with error, %d", + r); + break; + } + } + + /* For XGMI wait for all resets to complete before proceed */ + if (!r) { + list_for_each_entry(tmp_adev, reset_device_list, reset_list) { + flush_work(&tmp_adev->xgmi_reset_work); + r = tmp_adev->asic_reset_res; + if (r) + break; + } + } + + list_for_each_entry(tmp_adev, reset_device_list, reset_list) { + mutex_unlock(&tmp_adev->reset_cntl->reset_lock); + tmp_adev->reset_cntl->active_reset = AMD_RESET_METHOD_NONE; + } + + return r; +} + +int amdgpu_reset_do_xgmi_reset_on_init( + struct amdgpu_reset_context *reset_context) +{ + struct list_head *reset_device_list = reset_context->reset_device_list; + struct amdgpu_device *adev; + int r; + + if (!reset_device_list || list_empty(reset_device_list) || + list_is_singular(reset_device_list)) + return -EINVAL; + + adev = list_first_entry(reset_device_list, struct amdgpu_device, + reset_list); + r = amdgpu_reset_prepare_hwcontext(adev, reset_context); + if (r) + return r; + + r = amdgpu_reset_perform_reset(adev, reset_context); + + return r; +} + +struct amdgpu_reset_handler xgmi_reset_on_init_handler = { + .reset_method = AMD_RESET_METHOD_ON_INIT, + .prepare_env = NULL, + .prepare_hwcontext = amdgpu_reset_xgmi_reset_on_init_prep_hwctxt, + .perform_reset = amdgpu_reset_xgmi_reset_on_init_perform_reset, + .restore_hwcontext = amdgpu_reset_xgmi_reset_on_init_restore_hwctxt, + .restore_env = NULL, + .do_reset = NULL, +}; + int amdgpu_reset_init(struct amdgpu_device *adev) { int ret = 0; @@ -192,3 +342,8 @@ void amdgpu_reset_get_desc(struct amdgpu_reset_context *rst_ctxt, char *buf, strscpy(buf, "unknown", len); } } + +bool amdgpu_reset_in_recovery(struct amdgpu_device *adev) +{ + return (adev->init_lvl->level == AMDGPU_INIT_LEVEL_RESET_RECOVERY); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h index 1cb920abc2fe..4d9b9701139b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h @@ -153,4 +153,11 @@ void amdgpu_reset_get_desc(struct amdgpu_reset_context *rst_ctxt, char *buf, for (i = 0; (i < AMDGPU_RESET_MAX_HANDLERS) && \ (handler = (*reset_ctl->reset_handlers)[i]); \ ++i) + +extern struct amdgpu_reset_handler xgmi_reset_on_init_handler; +int amdgpu_reset_do_xgmi_reset_on_init( + struct amdgpu_reset_context *reset_context); + +bool amdgpu_reset_in_recovery(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 690976665cf6..a6e28fe3f8d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -108,10 +108,22 @@ int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned int ndw) */ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) { - int i; + uint32_t occupied, chunk1, chunk2; - for (i = 0; i < count; i++) - amdgpu_ring_write(ring, ring->funcs->nop); + occupied = ring->wptr & ring->buf_mask; + chunk1 = ring->buf_mask + 1 - occupied; + chunk1 = (chunk1 >= count) ? count : chunk1; + chunk2 = count - chunk1; + + if (chunk1) + memset32(&ring->ring[occupied], ring->funcs->nop, chunk1); + + if (chunk2) + memset32(ring->ring, ring->funcs->nop, chunk2); + + ring->wptr += count; + ring->wptr &= ring->ptr_mask; + ring->count_dw -= count; } /** @@ -141,6 +153,9 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring) { uint32_t count; + if (ring->count_dw < 0) + DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n"); + /* We pad to match fetch size */ count = ring->funcs->align_mask + 1 - (ring->wptr & ring->funcs->align_mask); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index f93f51002201..36fc9578c53c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -246,7 +246,7 @@ struct amdgpu_ring { struct drm_gpu_scheduler sched; struct amdgpu_bo *ring_obj; - volatile uint32_t *ring; + uint32_t *ring; unsigned rptr_offs; u64 rptr_gpu_addr; volatile u32 *rptr_cpu_addr; @@ -288,7 +288,7 @@ struct amdgpu_ring { u64 cond_exe_gpu_addr; volatile u32 *cond_exe_cpu_addr; unsigned int set_q_mode_offs; - volatile u32 *set_q_mode_ptr; + u32 *set_q_mode_ptr; u64 set_q_mode_token; unsigned vm_hub; unsigned vm_inv_eng; @@ -377,8 +377,6 @@ static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring) static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v) { - if (ring->count_dw <= 0) - DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n"); ring->ring[ring->wptr++ & ring->buf_mask] = v; ring->wptr &= ring->ptr_mask; ring->count_dw--; @@ -388,13 +386,8 @@ static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring, void *src, int count_dw) { unsigned occupied, chunk1, chunk2; - void *dst; - - if (unlikely(ring->count_dw < count_dw)) - DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n"); occupied = ring->wptr & ring->buf_mask; - dst = (void *)&ring->ring[occupied]; chunk1 = ring->buf_mask + 1 - occupied; chunk1 = (chunk1 >= count_dw) ? count_dw : chunk1; chunk2 = count_dw - chunk1; @@ -402,12 +395,11 @@ static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring, chunk2 <<= 2; if (chunk1) - memcpy(dst, src, chunk1); + memcpy(&ring->ring[occupied], src, chunk1); if (chunk2) { src += chunk1; - dst = (void *)ring->ring; - memcpy(dst, src, chunk2); + memcpy(ring->ring, src, chunk2); } ring->wptr += count_dw; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c index b0a8abc7a8ec..341beec59537 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c @@ -35,21 +35,19 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev, int fd, int32_t priority) { - struct fd f = fdget(fd); + CLASS(fd, f)(fd); struct amdgpu_fpriv *fpriv; struct amdgpu_ctx_mgr *mgr; struct amdgpu_ctx *ctx; uint32_t id; int r; - if (!fd_file(f)) + if (fd_empty(f)) return -EINVAL; r = amdgpu_file_to_fpriv(fd_file(f), &fpriv); - if (r) { - fdput(f); + if (r) return r; - } mgr = &fpriv->ctx_mgr; mutex_lock(&mgr->lock); @@ -57,7 +55,6 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev, amdgpu_ctx_priority_override(ctx, priority); mutex_unlock(&mgr->lock); - fdput(f); return 0; } @@ -66,31 +63,25 @@ static int amdgpu_sched_context_priority_override(struct amdgpu_device *adev, unsigned ctx_id, int32_t priority) { - struct fd f = fdget(fd); + CLASS(fd, f)(fd); struct amdgpu_fpriv *fpriv; struct amdgpu_ctx *ctx; int r; - if (!fd_file(f)) + if (fd_empty(f)) return -EINVAL; r = amdgpu_file_to_fpriv(fd_file(f), &fpriv); - if (r) { - fdput(f); + if (r) return r; - } ctx = amdgpu_ctx_get(fpriv, ctx_id); - if (!ctx) { - fdput(f); + if (!ctx) return -EINVAL; - } amdgpu_ctx_priority_override(ctx, priority); amdgpu_ctx_put(ctx); - fdput(f); - return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c index 183a976ba29d..113f0d242618 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c @@ -343,3 +343,116 @@ int amdgpu_sdma_ras_sw_init(struct amdgpu_device *adev) return 0; } + +/* + * debugfs for to enable/disable sdma job submission to specific core. + */ +#if defined(CONFIG_DEBUG_FS) +static int amdgpu_debugfs_sdma_sched_mask_set(void *data, u64 val) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)data; + u32 i; + u64 mask = 0; + struct amdgpu_ring *ring; + + if (!adev) + return -ENODEV; + + mask = (1 << adev->sdma.num_instances) - 1; + if ((val & mask) == 0) + return -EINVAL; + + for (i = 0; i < adev->sdma.num_instances; ++i) { + ring = &adev->sdma.instance[i].ring; + if (val & (1 << i)) + ring->sched.ready = true; + else + ring->sched.ready = false; + } + /* publish sched.ready flag update effective immediately across smp */ + smp_rmb(); + return 0; +} + +static int amdgpu_debugfs_sdma_sched_mask_get(void *data, u64 *val) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)data; + u32 i; + u64 mask = 0; + struct amdgpu_ring *ring; + + if (!adev) + return -ENODEV; + for (i = 0; i < adev->sdma.num_instances; ++i) { + ring = &adev->sdma.instance[i].ring; + if (ring->sched.ready) + mask |= 1 << i; + } + + *val = mask; + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_sdma_sched_mask_fops, + amdgpu_debugfs_sdma_sched_mask_get, + amdgpu_debugfs_sdma_sched_mask_set, "%llx\n"); + +#endif + +void amdgpu_debugfs_sdma_sched_mask_init(struct amdgpu_device *adev) +{ +#if defined(CONFIG_DEBUG_FS) + struct drm_minor *minor = adev_to_drm(adev)->primary; + struct dentry *root = minor->debugfs_root; + char name[32]; + + if (!(adev->sdma.num_instances > 1)) + return; + sprintf(name, "amdgpu_sdma_sched_mask"); + debugfs_create_file(name, 0600, root, adev, + &amdgpu_debugfs_sdma_sched_mask_fops); +#endif +} + +static ssize_t amdgpu_get_sdma_reset_mask(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + + if (!adev) + return -ENODEV; + + return amdgpu_show_reset_mask(buf, adev->sdma.supported_reset); +} + +static DEVICE_ATTR(sdma_reset_mask, 0444, + amdgpu_get_sdma_reset_mask, NULL); + +int amdgpu_sdma_sysfs_reset_mask_init(struct amdgpu_device *adev) +{ + int r = 0; + + if (!amdgpu_gpu_recovery) + return r; + + if (adev->sdma.num_instances) { + r = device_create_file(adev->dev, &dev_attr_sdma_reset_mask); + if (r) + return r; + } + + return r; +} + +void amdgpu_sdma_sysfs_reset_mask_fini(struct amdgpu_device *adev) +{ + if (!amdgpu_gpu_recovery) + return; + + if (adev->dev->kobj.sd) { + if (adev->sdma.num_instances) + device_remove_file(adev->dev, &dev_attr_sdma_reset_mask); + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h index 087ce0f6fa07..2db58b5812a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h @@ -116,6 +116,7 @@ struct amdgpu_sdma { struct ras_common_if *ras_if; struct amdgpu_sdma_ras *ras; uint32_t *ip_dump; + uint32_t supported_reset; }; /* @@ -175,5 +176,7 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev, u32 instance, void amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device *adev, bool duplicate); int amdgpu_sdma_ras_sw_init(struct amdgpu_device *adev); - +void amdgpu_debugfs_sdma_sched_mask_init(struct amdgpu_device *adev); +int amdgpu_sdma_sysfs_reset_mask_init(struct amdgpu_device *adev); +void amdgpu_sdma_sysfs_reset_mask_fini(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 74adb983ab03..c8180cad0abd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -61,7 +61,7 @@ #include "amdgpu_res_cursor.h" #include "bif/bif_4_1_d.h" -MODULE_IMPORT_NS(DMA_BUF); +MODULE_IMPORT_NS("DMA_BUF"); #define AMDGPU_TTM_VRAM_MAX_DW_READ ((size_t)128) @@ -812,7 +812,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev, /* Map SG to device */ r = dma_map_sgtable(adev->dev, ttm->sg, direction, 0); if (r) - goto release_sg; + goto release_sg_table; /* convert SG to linear array of pages and dma addresses */ drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address, @@ -820,6 +820,8 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev, return 0; +release_sg_table: + sg_free_table(ttm->sg); release_sg: kfree(ttm->sg); ttm->sg = NULL; @@ -1849,6 +1851,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) mutex_init(&adev->mman.gtt_window_lock); + dma_set_max_seg_size(adev->dev, UINT_MAX); /* No others user of address space so set it to 0 */ r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev, adev_to_drm(adev)->anon_inode->i_mapping, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 138d80017f35..2852a6064c9a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -34,6 +34,7 @@ #define AMDGPU_PL_OA (TTM_PL_PRIV + 2) #define AMDGPU_PL_PREEMPT (TTM_PL_PRIV + 3) #define AMDGPU_PL_DOORBELL (TTM_PL_PRIV + 4) +#define __AMDGPU_PL_LAST (TTM_PL_PRIV + 4) #define AMDGPU_GTT_MAX_TRANSFER_SIZE 512 #define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h index 4e23419b92d4..4150ec0aa10d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h @@ -163,6 +163,7 @@ enum ta_fw_type { TA_FW_TYPE_PSP_DTM, TA_FW_TYPE_PSP_RAP, TA_FW_TYPE_PSP_SECUREDISPLAY, + TA_FW_TYPE_PSP_XGMI_AUX, TA_FW_TYPE_MAX_INDEX, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c index bb7b9b2eaac1..896f3609b0ee 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c @@ -318,6 +318,9 @@ int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *r if (r) return r; + if (amdgpu_sriov_vf(adev)) + return r; + if (amdgpu_ras_is_supported(adev, ras_block->block)) { r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c index 6162582d0aa2..bd2d3863c3ed 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c @@ -765,9 +765,9 @@ static int umsch_mm_init(struct amdgpu_device *adev) } -static int umsch_mm_early_init(void *handle) +static int umsch_mm_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) { case IP_VERSION(4, 0, 5): @@ -784,9 +784,9 @@ static int umsch_mm_early_init(void *handle) return 0; } -static int umsch_mm_late_init(void *handle) +static int umsch_mm_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_in_reset(adev) || adev->in_s0ix || adev->in_suspend) return 0; @@ -794,9 +794,9 @@ static int umsch_mm_late_init(void *handle) return umsch_mm_test(adev); } -static int umsch_mm_sw_init(void *handle) +static int umsch_mm_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = umsch_mm_init(adev); @@ -815,9 +815,9 @@ static int umsch_mm_sw_init(void *handle) return 0; } -static int umsch_mm_sw_fini(void *handle) +static int umsch_mm_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; release_firmware(adev->umsch_mm.fw); adev->umsch_mm.fw = NULL; @@ -839,9 +839,9 @@ static int umsch_mm_sw_fini(void *handle) return 0; } -static int umsch_mm_hw_init(void *handle) +static int umsch_mm_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = umsch_mm_load_microcode(&adev->umsch_mm); @@ -857,9 +857,9 @@ static int umsch_mm_hw_init(void *handle) return 0; } -static int umsch_mm_hw_fini(void *handle) +static int umsch_mm_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; umsch_mm_ring_stop(&adev->umsch_mm); @@ -873,18 +873,14 @@ static int umsch_mm_hw_fini(void *handle) return 0; } -static int umsch_mm_suspend(void *handle) +static int umsch_mm_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return umsch_mm_hw_fini(adev); + return umsch_mm_hw_fini(ip_block); } -static int umsch_mm_resume(void *handle) +static int umsch_mm_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return umsch_mm_hw_init(adev); + return umsch_mm_hw_init(ip_block); } void amdgpu_umsch_fwlog_init(struct amdgpu_umsch_mm *umsch_mm) @@ -997,8 +993,6 @@ static const struct amd_ip_funcs umsch_mm_v4_0_ip_funcs = { .hw_fini = umsch_mm_hw_fini, .suspend = umsch_mm_suspend, .resume = umsch_mm_resume, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; const struct amdgpu_ip_block_version umsch_mm_v4_0_ip_block = { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 74fdbf71d95b..599d3ca4e0ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -214,15 +214,15 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev) drm_sched_entity_destroy(&adev->vce.entity); - amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr, - (void **)&adev->vce.cpu_addr); - for (i = 0; i < adev->vce.num_rings; i++) amdgpu_ring_fini(&adev->vce.ring[i]); amdgpu_ucode_release(&adev->vce.fw); mutex_destroy(&adev->vce.idle_mutex); + amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr, + (void **)&adev->vce.cpu_addr); + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 43f44cc201cb..3e94c3ba1ba2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -294,21 +294,12 @@ bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type t return ret; } -int amdgpu_vcn_suspend(struct amdgpu_device *adev) +int amdgpu_vcn_save_vcpu_bo(struct amdgpu_device *adev) { unsigned int size; void *ptr; int i, idx; - bool in_ras_intr = amdgpu_ras_intr_triggered(); - - cancel_delayed_work_sync(&adev->vcn.idle_work); - - /* err_event_athub will corrupt VCPU buffer, so we need to - * restore fw data and clear buffer in amdgpu_vcn_resume() */ - if (in_ras_intr) - return 0; - for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { if (adev->vcn.harvest_config & (1 << i)) continue; @@ -327,9 +318,24 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev) drm_dev_exit(idx); } } + return 0; } +int amdgpu_vcn_suspend(struct amdgpu_device *adev) +{ + bool in_ras_intr = amdgpu_ras_intr_triggered(); + + cancel_delayed_work_sync(&adev->vcn.idle_work); + + /* err_event_athub will corrupt VCPU buffer, so we need to + * restore fw data and clear buffer in amdgpu_vcn_resume() */ + if (in_ras_intr) + return 0; + + return amdgpu_vcn_save_vcpu_bo(adev); +} + int amdgpu_vcn_resume(struct amdgpu_device *adev) { unsigned int size; @@ -1277,3 +1283,40 @@ int amdgpu_vcn_psp_update_sram(struct amdgpu_device *adev, int inst_idx, return psp_execute_ip_fw_load(&adev->psp, &ucode); } + +static ssize_t amdgpu_get_vcn_reset_mask(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + + if (!adev) + return -ENODEV; + + return amdgpu_show_reset_mask(buf, adev->vcn.supported_reset); +} + +static DEVICE_ATTR(vcn_reset_mask, 0444, + amdgpu_get_vcn_reset_mask, NULL); + +int amdgpu_vcn_sysfs_reset_mask_init(struct amdgpu_device *adev) +{ + int r = 0; + + if (adev->vcn.num_vcn_inst) { + r = device_create_file(adev->dev, &dev_attr_vcn_reset_mask); + if (r) + return r; + } + + return r; +} + +void amdgpu_vcn_sysfs_reset_mask_fini(struct amdgpu_device *adev) +{ + if (adev->dev->kobj.sd) { + if (adev->vcn.num_vcn_inst) + device_remove_file(adev->dev, &dev_attr_vcn_reset_mask); + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index 2a1f3dbb14d3..1e32311c1dff 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -333,6 +333,8 @@ struct amdgpu_vcn { /* IP reg dump */ uint32_t *ip_dump; + + uint32_t supported_reset; }; struct amdgpu_fw_shared_rb_ptrs_struct { @@ -518,5 +520,8 @@ int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev); int amdgpu_vcn_psp_update_sram(struct amdgpu_device *adev, int inst_idx, enum AMDGPU_UCODE_ID ucode_id); +int amdgpu_vcn_save_vcpu_bo(struct amdgpu_device *adev); +int amdgpu_vcn_sysfs_reset_mask_init(struct amdgpu_device *adev); +void amdgpu_vcn_sysfs_reset_mask_fini(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index b6397d3229e1..c704e9803e11 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -523,6 +523,9 @@ static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev) adev->unique_id = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->uuid; + adev->virt.ras_en_caps.all = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->ras_en_caps.all; + adev->virt.ras_telemetry_en_caps.all = + ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->ras_telemetry_en_caps.all; break; default: dev_err(adev->dev, "invalid pf2vf version: 0x%x\n", pf2vf_info->version); @@ -703,6 +706,8 @@ void amdgpu_virt_exchange_data(struct amdgpu_device *adev) adev->virt.fw_reserve.p_vf2pf = (struct amd_sriov_msg_vf2pf_info_header *) (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10)); + adev->virt.fw_reserve.ras_telemetry = + (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB << 10)); } else if (adev->mman.drv_vram_usage_va) { adev->virt.fw_reserve.p_pf2vf = (struct amd_sriov_msg_pf2vf_info_header *) @@ -710,6 +715,8 @@ void amdgpu_virt_exchange_data(struct amdgpu_device *adev) adev->virt.fw_reserve.p_vf2pf = (struct amd_sriov_msg_vf2pf_info_header *) (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10)); + adev->virt.fw_reserve.ras_telemetry = + (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB << 10)); } amdgpu_virt_read_pf2vf_data(adev); @@ -1144,3 +1151,185 @@ bool amdgpu_sriov_xnack_support(struct amdgpu_device *adev) return xnack_mode; } + +bool amdgpu_virt_get_ras_capability(struct amdgpu_device *adev) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + + if (!amdgpu_sriov_ras_caps_en(adev)) + return false; + + if (adev->virt.ras_en_caps.bits.block_umc) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__UMC); + if (adev->virt.ras_en_caps.bits.block_sdma) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__SDMA); + if (adev->virt.ras_en_caps.bits.block_gfx) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__GFX); + if (adev->virt.ras_en_caps.bits.block_mmhub) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__MMHUB); + if (adev->virt.ras_en_caps.bits.block_athub) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__ATHUB); + if (adev->virt.ras_en_caps.bits.block_pcie_bif) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__PCIE_BIF); + if (adev->virt.ras_en_caps.bits.block_hdp) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__HDP); + if (adev->virt.ras_en_caps.bits.block_xgmi_wafl) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__XGMI_WAFL); + if (adev->virt.ras_en_caps.bits.block_df) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__DF); + if (adev->virt.ras_en_caps.bits.block_smn) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__SMN); + if (adev->virt.ras_en_caps.bits.block_sem) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__SEM); + if (adev->virt.ras_en_caps.bits.block_mp0) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__MP0); + if (adev->virt.ras_en_caps.bits.block_mp1) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__MP1); + if (adev->virt.ras_en_caps.bits.block_fuse) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__FUSE); + if (adev->virt.ras_en_caps.bits.block_mca) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__MCA); + if (adev->virt.ras_en_caps.bits.block_vcn) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__VCN); + if (adev->virt.ras_en_caps.bits.block_jpeg) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__JPEG); + if (adev->virt.ras_en_caps.bits.block_ih) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__IH); + if (adev->virt.ras_en_caps.bits.block_mpio) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__MPIO); + + if (adev->virt.ras_en_caps.bits.poison_propogation_mode) + con->poison_supported = true; /* Poison is handled by host */ + + return true; +} + +static inline enum amd_sriov_ras_telemetry_gpu_block +amdgpu_ras_block_to_sriov(struct amdgpu_device *adev, enum amdgpu_ras_block block) { + switch (block) { + case AMDGPU_RAS_BLOCK__UMC: + return RAS_TELEMETRY_GPU_BLOCK_UMC; + case AMDGPU_RAS_BLOCK__SDMA: + return RAS_TELEMETRY_GPU_BLOCK_SDMA; + case AMDGPU_RAS_BLOCK__GFX: + return RAS_TELEMETRY_GPU_BLOCK_GFX; + case AMDGPU_RAS_BLOCK__MMHUB: + return RAS_TELEMETRY_GPU_BLOCK_MMHUB; + case AMDGPU_RAS_BLOCK__ATHUB: + return RAS_TELEMETRY_GPU_BLOCK_ATHUB; + case AMDGPU_RAS_BLOCK__PCIE_BIF: + return RAS_TELEMETRY_GPU_BLOCK_PCIE_BIF; + case AMDGPU_RAS_BLOCK__HDP: + return RAS_TELEMETRY_GPU_BLOCK_HDP; + case AMDGPU_RAS_BLOCK__XGMI_WAFL: + return RAS_TELEMETRY_GPU_BLOCK_XGMI_WAFL; + case AMDGPU_RAS_BLOCK__DF: + return RAS_TELEMETRY_GPU_BLOCK_DF; + case AMDGPU_RAS_BLOCK__SMN: + return RAS_TELEMETRY_GPU_BLOCK_SMN; + case AMDGPU_RAS_BLOCK__SEM: + return RAS_TELEMETRY_GPU_BLOCK_SEM; + case AMDGPU_RAS_BLOCK__MP0: + return RAS_TELEMETRY_GPU_BLOCK_MP0; + case AMDGPU_RAS_BLOCK__MP1: + return RAS_TELEMETRY_GPU_BLOCK_MP1; + case AMDGPU_RAS_BLOCK__FUSE: + return RAS_TELEMETRY_GPU_BLOCK_FUSE; + case AMDGPU_RAS_BLOCK__MCA: + return RAS_TELEMETRY_GPU_BLOCK_MCA; + case AMDGPU_RAS_BLOCK__VCN: + return RAS_TELEMETRY_GPU_BLOCK_VCN; + case AMDGPU_RAS_BLOCK__JPEG: + return RAS_TELEMETRY_GPU_BLOCK_JPEG; + case AMDGPU_RAS_BLOCK__IH: + return RAS_TELEMETRY_GPU_BLOCK_IH; + case AMDGPU_RAS_BLOCK__MPIO: + return RAS_TELEMETRY_GPU_BLOCK_MPIO; + default: + dev_err(adev->dev, "Unsupported SRIOV RAS telemetry block 0x%x\n", block); + return RAS_TELEMETRY_GPU_BLOCK_COUNT; + } +} + +static int amdgpu_virt_cache_host_error_counts(struct amdgpu_device *adev, + struct amdsriov_ras_telemetry *host_telemetry) +{ + struct amd_sriov_ras_telemetry_error_count *tmp = NULL; + uint32_t checksum, used_size; + + checksum = host_telemetry->header.checksum; + used_size = host_telemetry->header.used_size; + + if (used_size > (AMD_SRIOV_RAS_TELEMETRY_SIZE_KB << 10)) + return 0; + + tmp = kmalloc(used_size, GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + memcpy(tmp, &host_telemetry->body.error_count, used_size); + + if (checksum != amd_sriov_msg_checksum(tmp, used_size, 0, 0)) + goto out; + + memcpy(&adev->virt.count_cache, tmp, + min(used_size, sizeof(adev->virt.count_cache))); +out: + kfree(tmp); + + return 0; +} + +static int amdgpu_virt_req_ras_err_count_internal(struct amdgpu_device *adev, bool force_update) +{ + struct amdgpu_virt *virt = &adev->virt; + + /* Host allows 15 ras telemetry requests per 60 seconds. Afterwhich, the Host + * will ignore incoming guest messages. Ratelimit the guest messages to + * prevent guest self DOS. + */ + if (__ratelimit(&adev->virt.ras_telemetry_rs) || force_update) { + if (!virt->ops->req_ras_err_count(adev)) + amdgpu_virt_cache_host_error_counts(adev, + adev->virt.fw_reserve.ras_telemetry); + } + + return 0; +} + +/* Bypass ACA interface and query ECC counts directly from host */ +int amdgpu_virt_req_ras_err_count(struct amdgpu_device *adev, enum amdgpu_ras_block block, + struct ras_err_data *err_data) +{ + enum amd_sriov_ras_telemetry_gpu_block sriov_block; + + sriov_block = amdgpu_ras_block_to_sriov(adev, block); + + if (sriov_block >= RAS_TELEMETRY_GPU_BLOCK_COUNT || + !amdgpu_sriov_ras_telemetry_block_en(adev, sriov_block)) + return -EOPNOTSUPP; + + /* Host Access may be lost during reset, just return last cached data. */ + if (down_read_trylock(&adev->reset_domain->sem)) { + amdgpu_virt_req_ras_err_count_internal(adev, false); + up_read(&adev->reset_domain->sem); + } + + err_data->ue_count = adev->virt.count_cache.block[sriov_block].ue_count; + err_data->ce_count = adev->virt.count_cache.block[sriov_block].ce_count; + err_data->de_count = adev->virt.count_cache.block[sriov_block].de_count; + + return 0; +} + +int amdgpu_virt_ras_telemetry_post_reset(struct amdgpu_device *adev) +{ + unsigned long ue_count, ce_count; + + if (amdgpu_sriov_ras_telemetry_en(adev)) { + amdgpu_virt_req_ras_err_count_internal(adev, true); + amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, NULL); + } + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index b650a2032c42..5381b8d596e6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -95,6 +95,7 @@ struct amdgpu_virt_ops { void (*ras_poison_handler)(struct amdgpu_device *adev, enum amdgpu_ras_block block); bool (*rcvd_ras_intr)(struct amdgpu_device *adev); + int (*req_ras_err_count)(struct amdgpu_device *adev); }; /* @@ -103,6 +104,7 @@ struct amdgpu_virt_ops { struct amdgpu_virt_fw_reserve { struct amd_sriov_msg_pf2vf_info_header *p_pf2vf; struct amd_sriov_msg_vf2pf_info_header *p_vf2pf; + void *ras_telemetry; unsigned int checksum_key; }; @@ -136,6 +138,8 @@ enum AMDGIM_FEATURE_FLAG { AMDGIM_FEATURE_VCN_RB_DECOUPLE = (1 << 7), /* MES info */ AMDGIM_FEATURE_MES_INFO_ENABLE = (1 << 8), + AMDGIM_FEATURE_RAS_CAPS = (1 << 9), + AMDGIM_FEATURE_RAS_TELEMETRY = (1 << 10), }; enum AMDGIM_REG_ACCESS_FLAG { @@ -276,6 +280,12 @@ struct amdgpu_virt { uint32_t autoload_ucode_id; struct mutex rlcg_reg_lock; + + union amd_sriov_ras_caps ras_en_caps; + union amd_sriov_ras_caps ras_telemetry_en_caps; + + struct ratelimit_state ras_telemetry_rs; + struct amd_sriov_ras_telemetry_error_count count_cache; }; struct amdgpu_video_codec_info; @@ -320,6 +330,15 @@ struct amdgpu_video_codec_info; #define amdgpu_sriov_vf_mmio_access_protection(adev) \ ((adev)->virt.caps & AMDGPU_VF_MMIO_ACCESS_PROTECT) +#define amdgpu_sriov_ras_caps_en(adev) \ +((adev)->virt.gim_feature & AMDGIM_FEATURE_RAS_CAPS) + +#define amdgpu_sriov_ras_telemetry_en(adev) \ +(((adev)->virt.gim_feature & AMDGIM_FEATURE_RAS_TELEMETRY) && (adev)->virt.fw_reserve.ras_telemetry) + +#define amdgpu_sriov_ras_telemetry_block_en(adev, sriov_blk) \ +(amdgpu_sriov_ras_telemetry_en((adev)) && (adev)->virt.ras_telemetry_en_caps.all & BIT(sriov_blk)) + static inline bool is_virtual_machine(void) { #if defined(CONFIG_X86) @@ -383,4 +402,8 @@ bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev, u32 acc_flags, u32 hwip, bool write, u32 *rlcg_flag); u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag, u32 xcc_id); +bool amdgpu_virt_get_ras_capability(struct amdgpu_device *adev); +int amdgpu_virt_req_ras_err_count(struct amdgpu_device *adev, enum amdgpu_ras_block block, + struct ras_err_data *err_data); +int amdgpu_virt_ras_telemetry_post_reset(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c index d4c2afafbb73..8bf28d336807 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c @@ -493,10 +493,10 @@ const struct drm_mode_config_funcs amdgpu_vkms_mode_funcs = { .atomic_commit = drm_atomic_helper_commit, }; -static int amdgpu_vkms_sw_init(void *handle) +static int amdgpu_vkms_sw_init(struct amdgpu_ip_block *ip_block) { int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->amdgpu_vkms_output = kcalloc(adev->mode_info.num_crtc, sizeof(struct amdgpu_vkms_output), GFP_KERNEL); @@ -536,9 +536,9 @@ static int amdgpu_vkms_sw_init(void *handle) return 0; } -static int amdgpu_vkms_sw_fini(void *handle) +static int amdgpu_vkms_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i = 0; for (i = 0; i < adev->mode_info.num_crtc; i++) @@ -555,9 +555,9 @@ static int amdgpu_vkms_sw_fini(void *handle) return 0; } -static int amdgpu_vkms_hw_init(void *handle) +static int amdgpu_vkms_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; switch (adev->asic_type) { #ifdef CONFIG_DRM_AMDGPU_SI @@ -600,31 +600,31 @@ static int amdgpu_vkms_hw_init(void *handle) return 0; } -static int amdgpu_vkms_hw_fini(void *handle) +static int amdgpu_vkms_hw_fini(struct amdgpu_ip_block *ip_block) { return 0; } -static int amdgpu_vkms_suspend(void *handle) +static int amdgpu_vkms_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = drm_mode_config_helper_suspend(adev_to_drm(adev)); if (r) return r; - return amdgpu_vkms_hw_fini(handle); + + return 0; } -static int amdgpu_vkms_resume(void *handle) +static int amdgpu_vkms_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = amdgpu_vkms_hw_init(handle); + r = amdgpu_vkms_hw_init(ip_block); if (r) return r; - return drm_mode_config_helper_resume(adev_to_drm(adev)); + return drm_mode_config_helper_resume(adev_to_drm(ip_block->adev)); } static bool amdgpu_vkms_is_idle(void *handle) @@ -632,16 +632,6 @@ static bool amdgpu_vkms_is_idle(void *handle) return true; } -static int amdgpu_vkms_wait_for_idle(void *handle) -{ - return 0; -} - -static int amdgpu_vkms_soft_reset(void *handle) -{ - return 0; -} - static int amdgpu_vkms_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -656,8 +646,6 @@ static int amdgpu_vkms_set_powergating_state(void *handle, static const struct amd_ip_funcs amdgpu_vkms_ip_funcs = { .name = "amdgpu_vkms", - .early_init = NULL, - .late_init = NULL, .sw_init = amdgpu_vkms_sw_init, .sw_fini = amdgpu_vkms_sw_fini, .hw_init = amdgpu_vkms_hw_init, @@ -665,12 +653,8 @@ static const struct amd_ip_funcs amdgpu_vkms_ip_funcs = { .suspend = amdgpu_vkms_suspend, .resume = amdgpu_vkms_resume, .is_idle = amdgpu_vkms_is_idle, - .wait_for_idle = amdgpu_vkms_wait_for_idle, - .soft_reset = amdgpu_vkms_soft_reset, .set_clockgating_state = amdgpu_vkms_set_clockgating_state, .set_powergating_state = amdgpu_vkms_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; const struct amdgpu_ip_block_version amdgpu_vkms_ip_block = { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 6005280f5f38..8d9bf7a0857f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1083,7 +1083,8 @@ error_free: } static void amdgpu_vm_bo_get_memory(struct amdgpu_bo_va *bo_va, - struct amdgpu_mem_stats *stats) + struct amdgpu_mem_stats *stats, + unsigned int size) { struct amdgpu_vm *vm = bo_va->base.vm; struct amdgpu_bo *bo = bo_va->base.bo; @@ -1099,34 +1100,35 @@ static void amdgpu_vm_bo_get_memory(struct amdgpu_bo_va *bo_va, !dma_resv_trylock(bo->tbo.base.resv)) return; - amdgpu_bo_get_memory(bo, stats); + amdgpu_bo_get_memory(bo, stats, size); if (!amdgpu_vm_is_bo_always_valid(vm, bo)) dma_resv_unlock(bo->tbo.base.resv); } void amdgpu_vm_get_memory(struct amdgpu_vm *vm, - struct amdgpu_mem_stats *stats) + struct amdgpu_mem_stats *stats, + unsigned int size) { struct amdgpu_bo_va *bo_va, *tmp; spin_lock(&vm->status_lock); list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) - amdgpu_vm_bo_get_memory(bo_va, stats); + amdgpu_vm_bo_get_memory(bo_va, stats, size); list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) - amdgpu_vm_bo_get_memory(bo_va, stats); + amdgpu_vm_bo_get_memory(bo_va, stats, size); list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) - amdgpu_vm_bo_get_memory(bo_va, stats); + amdgpu_vm_bo_get_memory(bo_va, stats, size); list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) - amdgpu_vm_bo_get_memory(bo_va, stats); + amdgpu_vm_bo_get_memory(bo_va, stats, size); list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) - amdgpu_vm_bo_get_memory(bo_va, stats); + amdgpu_vm_bo_get_memory(bo_va, stats, size); list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) - amdgpu_vm_bo_get_memory(bo_va, stats); + amdgpu_vm_bo_get_memory(bo_va, stats, size); spin_unlock(&vm->status_lock); } @@ -1159,7 +1161,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, int r; amdgpu_sync_create(&sync); - if (clear || !bo) { + if (clear) { mem = NULL; /* Implicitly sync to command submissions in the same VM before @@ -1174,6 +1176,10 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, if (r) goto error_free; } + } else if (!bo) { + mem = NULL; + + /* PRT map operations don't need to sync to anything. */ } else { struct drm_gem_object *obj = &bo->tbo.base; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 52dd7cdfdc81..5d119ac26c4f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -42,7 +42,6 @@ struct amdgpu_bo_va; struct amdgpu_job; struct amdgpu_bo_list_entry; struct amdgpu_bo_vm; -struct amdgpu_mem_stats; /* * GPUVM handling @@ -322,6 +321,16 @@ struct amdgpu_vm_fault_info { unsigned int vmhub; }; +struct amdgpu_mem_stats { + struct drm_memory_stats drm; + + /* buffers that requested this placement */ + uint64_t requested; + /* buffers that requested this placement + * but are currently evicted */ + uint64_t evicted; +}; + struct amdgpu_vm { /* tree of virtual addresses mapped */ struct rb_root_cached va; @@ -567,7 +576,8 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm); void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_vm_get_memory(struct amdgpu_vm *vm, - struct amdgpu_mem_stats *stats); + struct amdgpu_mem_stats *stats, + unsigned int size); int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_bo_vm *vmbo, bool immediate); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c index 5acd20ff5979..110b120d7375 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c @@ -295,9 +295,9 @@ int amdgpu_vpe_ring_fini(struct amdgpu_vpe *vpe) return 0; } -static int vpe_early_init(void *handle) +static int vpe_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_vpe *vpe = &adev->vpe; switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) { @@ -356,9 +356,9 @@ static int vpe_common_init(struct amdgpu_vpe *vpe) return 0; } -static int vpe_sw_init(void *handle) +static int vpe_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_vpe *vpe = &adev->vpe; int ret; @@ -377,18 +377,26 @@ static int vpe_sw_init(void *handle) ret = vpe_init_microcode(vpe); if (ret) goto out; + + /* TODO: Add queue reset mask when FW fully supports it */ + adev->vpe.supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->vpe.ring); + ret = amdgpu_vpe_sysfs_reset_mask_init(adev); + if (ret) + goto out; out: return ret; } -static int vpe_sw_fini(void *handle) +static int vpe_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_vpe *vpe = &adev->vpe; release_firmware(vpe->fw); vpe->fw = NULL; + amdgpu_vpe_sysfs_reset_mask_fini(adev); vpe_ring_fini(vpe); amdgpu_bo_free_kernel(&adev->vpe.cmdbuf_obj, @@ -398,9 +406,9 @@ static int vpe_sw_fini(void *handle) return 0; } -static int vpe_hw_init(void *handle) +static int vpe_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_vpe *vpe = &adev->vpe; int ret; @@ -421,9 +429,9 @@ static int vpe_hw_init(void *handle) return 0; } -static int vpe_hw_fini(void *handle) +static int vpe_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_vpe *vpe = &adev->vpe; vpe_ring_stop(vpe); @@ -434,20 +442,18 @@ static int vpe_hw_fini(void *handle) return 0; } -static int vpe_suspend(void *handle) +static int vpe_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; cancel_delayed_work_sync(&adev->vpe.idle_work); - return vpe_hw_fini(adev); + return vpe_hw_fini(ip_block); } -static int vpe_resume(void *handle) +static int vpe_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return vpe_hw_init(adev); + return vpe_hw_init(ip_block); } static void vpe_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) @@ -867,6 +873,43 @@ static void vpe_ring_end_use(struct amdgpu_ring *ring) schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT); } +static ssize_t amdgpu_get_vpe_reset_mask(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + + if (!adev) + return -ENODEV; + + return amdgpu_show_reset_mask(buf, adev->vpe.supported_reset); +} + +static DEVICE_ATTR(vpe_reset_mask, 0444, + amdgpu_get_vpe_reset_mask, NULL); + +int amdgpu_vpe_sysfs_reset_mask_init(struct amdgpu_device *adev) +{ + int r = 0; + + if (adev->vpe.num_instances) { + r = device_create_file(adev->dev, &dev_attr_vpe_reset_mask); + if (r) + return r; + } + + return r; +} + +void amdgpu_vpe_sysfs_reset_mask_fini(struct amdgpu_device *adev) +{ + if (adev->dev->kobj.sd) { + if (adev->vpe.num_instances) + device_remove_file(adev->dev, &dev_attr_vpe_reset_mask); + } +} + static const struct amdgpu_ring_funcs vpe_ring_funcs = { .type = AMDGPU_RING_TYPE_VPE, .align_mask = 0xf, @@ -908,14 +951,12 @@ static void vpe_set_ring_funcs(struct amdgpu_device *adev) const struct amd_ip_funcs vpe_ip_funcs = { .name = "vpe_v6_1", .early_init = vpe_early_init, - .late_init = NULL, .sw_init = vpe_sw_init, .sw_fini = vpe_sw_fini, .hw_init = vpe_hw_init, .hw_fini = vpe_hw_fini, .suspend = vpe_suspend, .resume = vpe_resume, - .soft_reset = NULL, .set_clockgating_state = vpe_set_clockgating_state, .set_powergating_state = vpe_set_powergating_state, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h index 231d86d0953e..695da740a97e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h @@ -79,6 +79,7 @@ struct amdgpu_vpe { uint32_t num_instances; bool collaborate_mode; + uint32_t supported_reset; }; int amdgpu_vpe_psp_update_sram(struct amdgpu_device *adev); @@ -86,6 +87,8 @@ int amdgpu_vpe_init_microcode(struct amdgpu_vpe *vpe); int amdgpu_vpe_ring_init(struct amdgpu_vpe *vpe); int amdgpu_vpe_ring_fini(struct amdgpu_vpe *vpe); int amdgpu_vpe_configure_dpm(struct amdgpu_vpe *vpe); +void amdgpu_vpe_sysfs_reset_mask_fini(struct amdgpu_device *adev); +int amdgpu_vpe_sysfs_reset_mask_init(struct amdgpu_device *adev); #define vpe_ring_init(vpe) ((vpe)->funcs->ring_init ? (vpe)->funcs->ring_init((vpe)) : 0) #define vpe_ring_start(vpe) ((vpe)->funcs->ring_start ? (vpe)->funcs->ring_start((vpe)) : 0) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c index a6d456ec6aeb..e209b5e101df 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c @@ -433,3 +433,292 @@ void amdgpu_xcp_release_sched(struct amdgpu_device *adev, } } +#define XCP_CFG_SYSFS_RES_ATTR_SHOW(_name) \ + static ssize_t amdgpu_xcp_res_sysfs_##_name##_show( \ + struct amdgpu_xcp_res_details *xcp_res, char *buf) \ + { \ + return sysfs_emit(buf, "%d\n", xcp_res->_name); \ + } + +struct amdgpu_xcp_res_sysfs_attribute { + struct attribute attr; + ssize_t (*show)(struct amdgpu_xcp_res_details *xcp_res, char *buf); +}; + +#define XCP_CFG_SYSFS_RES_ATTR(_name) \ + struct amdgpu_xcp_res_sysfs_attribute xcp_res_sysfs_attr_##_name = { \ + .attr = { .name = __stringify(_name), .mode = 0400 }, \ + .show = amdgpu_xcp_res_sysfs_##_name##_show, \ + } + +XCP_CFG_SYSFS_RES_ATTR_SHOW(num_inst) +XCP_CFG_SYSFS_RES_ATTR(num_inst); +XCP_CFG_SYSFS_RES_ATTR_SHOW(num_shared) +XCP_CFG_SYSFS_RES_ATTR(num_shared); + +#define XCP_CFG_SYSFS_RES_ATTR_PTR(_name) xcp_res_sysfs_attr_##_name.attr + +static struct attribute *xcp_cfg_res_sysfs_attrs[] = { + &XCP_CFG_SYSFS_RES_ATTR_PTR(num_inst), + &XCP_CFG_SYSFS_RES_ATTR_PTR(num_shared), NULL +}; + +static const char *xcp_desc[] = { + [AMDGPU_SPX_PARTITION_MODE] = "SPX", + [AMDGPU_DPX_PARTITION_MODE] = "DPX", + [AMDGPU_TPX_PARTITION_MODE] = "TPX", + [AMDGPU_QPX_PARTITION_MODE] = "QPX", + [AMDGPU_CPX_PARTITION_MODE] = "CPX", +}; + +static const char *nps_desc[] = { + [UNKNOWN_MEMORY_PARTITION_MODE] = "UNKNOWN", + [AMDGPU_NPS1_PARTITION_MODE] = "NPS1", + [AMDGPU_NPS2_PARTITION_MODE] = "NPS2", + [AMDGPU_NPS3_PARTITION_MODE] = "NPS3", + [AMDGPU_NPS4_PARTITION_MODE] = "NPS4", + [AMDGPU_NPS6_PARTITION_MODE] = "NPS6", + [AMDGPU_NPS8_PARTITION_MODE] = "NPS8", +}; + +ATTRIBUTE_GROUPS(xcp_cfg_res_sysfs); + +#define to_xcp_attr(x) \ + container_of(x, struct amdgpu_xcp_res_sysfs_attribute, attr) +#define to_xcp_res(x) container_of(x, struct amdgpu_xcp_res_details, kobj) + +static ssize_t xcp_cfg_res_sysfs_attr_show(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct amdgpu_xcp_res_sysfs_attribute *attribute; + struct amdgpu_xcp_res_details *xcp_res; + + attribute = to_xcp_attr(attr); + xcp_res = to_xcp_res(kobj); + + if (!attribute->show) + return -EIO; + + return attribute->show(xcp_res, buf); +} + +static const struct sysfs_ops xcp_cfg_res_sysfs_ops = { + .show = xcp_cfg_res_sysfs_attr_show, +}; + +static const struct kobj_type xcp_cfg_res_sysfs_ktype = { + .sysfs_ops = &xcp_cfg_res_sysfs_ops, + .default_groups = xcp_cfg_res_sysfs_groups, +}; + +const char *xcp_res_names[] = { + [AMDGPU_XCP_RES_XCC] = "xcc", + [AMDGPU_XCP_RES_DMA] = "dma", + [AMDGPU_XCP_RES_DEC] = "dec", + [AMDGPU_XCP_RES_JPEG] = "jpeg", +}; + +static int amdgpu_xcp_get_res_info(struct amdgpu_xcp_mgr *xcp_mgr, + int mode, + struct amdgpu_xcp_cfg *xcp_cfg) +{ + if (xcp_mgr->funcs && xcp_mgr->funcs->get_xcp_res_info) + return xcp_mgr->funcs->get_xcp_res_info(xcp_mgr, mode, xcp_cfg); + + return -EOPNOTSUPP; +} + +#define to_xcp_cfg(x) container_of(x, struct amdgpu_xcp_cfg, kobj) +static ssize_t supported_xcp_configs_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj); + struct amdgpu_xcp_mgr *xcp_mgr = xcp_cfg->xcp_mgr; + int size = 0, mode; + char *sep = ""; + + if (!xcp_mgr || !xcp_mgr->supp_xcp_modes) + return sysfs_emit(buf, "Not supported\n"); + + for_each_inst(mode, xcp_mgr->supp_xcp_modes) { + size += sysfs_emit_at(buf, size, "%s%s", sep, xcp_desc[mode]); + sep = ", "; + } + + size += sysfs_emit_at(buf, size, "\n"); + + return size; +} + +static ssize_t supported_nps_configs_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj); + int size = 0, mode; + char *sep = ""; + + if (!xcp_cfg || !xcp_cfg->compatible_nps_modes) + return sysfs_emit(buf, "Not supported\n"); + + for_each_inst(mode, xcp_cfg->compatible_nps_modes) { + size += sysfs_emit_at(buf, size, "%s%s", sep, nps_desc[mode]); + sep = ", "; + } + + size += sysfs_emit_at(buf, size, "\n"); + + return size; +} + +static ssize_t xcp_config_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj); + + return sysfs_emit(buf, "%s\n", + amdgpu_gfx_compute_mode_desc(xcp_cfg->mode)); +} + +static ssize_t xcp_config_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t size) +{ + struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj); + int mode, r; + + if (!strncasecmp("SPX", buf, strlen("SPX"))) + mode = AMDGPU_SPX_PARTITION_MODE; + else if (!strncasecmp("DPX", buf, strlen("DPX"))) + mode = AMDGPU_DPX_PARTITION_MODE; + else if (!strncasecmp("TPX", buf, strlen("TPX"))) + mode = AMDGPU_TPX_PARTITION_MODE; + else if (!strncasecmp("QPX", buf, strlen("QPX"))) + mode = AMDGPU_QPX_PARTITION_MODE; + else if (!strncasecmp("CPX", buf, strlen("CPX"))) + mode = AMDGPU_CPX_PARTITION_MODE; + else + return -EINVAL; + + r = amdgpu_xcp_get_res_info(xcp_cfg->xcp_mgr, mode, xcp_cfg); + + if (r) + return r; + + xcp_cfg->mode = mode; + return size; +} + +static struct kobj_attribute xcp_cfg_sysfs_mode = + __ATTR_RW_MODE(xcp_config, 0644); + +static void xcp_cfg_sysfs_release(struct kobject *kobj) +{ + struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj); + + kfree(xcp_cfg); +} + +static const struct kobj_type xcp_cfg_sysfs_ktype = { + .release = xcp_cfg_sysfs_release, + .sysfs_ops = &kobj_sysfs_ops, +}; + +static struct kobj_attribute supp_part_sysfs_mode = + __ATTR_RO(supported_xcp_configs); + +static struct kobj_attribute supp_nps_sysfs_mode = + __ATTR_RO(supported_nps_configs); + +static const struct attribute *xcp_attrs[] = { + &supp_part_sysfs_mode.attr, + &xcp_cfg_sysfs_mode.attr, + NULL, +}; + +void amdgpu_xcp_cfg_sysfs_init(struct amdgpu_device *adev) +{ + struct amdgpu_xcp_res_details *xcp_res; + struct amdgpu_xcp_cfg *xcp_cfg; + int i, r, j, rid, mode; + + if (!adev->xcp_mgr) + return; + + xcp_cfg = kzalloc(sizeof(*xcp_cfg), GFP_KERNEL); + if (!xcp_cfg) + return; + xcp_cfg->xcp_mgr = adev->xcp_mgr; + + r = kobject_init_and_add(&xcp_cfg->kobj, &xcp_cfg_sysfs_ktype, + &adev->dev->kobj, "compute_partition_config"); + if (r) + goto err1; + + r = sysfs_create_files(&xcp_cfg->kobj, xcp_attrs); + if (r) + goto err1; + + if (adev->gmc.supported_nps_modes != 0) { + r = sysfs_create_file(&xcp_cfg->kobj, &supp_nps_sysfs_mode.attr); + if (r) { + sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs); + goto err1; + } + } + + mode = (xcp_cfg->xcp_mgr->mode == + AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) ? + AMDGPU_SPX_PARTITION_MODE : + xcp_cfg->xcp_mgr->mode; + r = amdgpu_xcp_get_res_info(xcp_cfg->xcp_mgr, mode, xcp_cfg); + if (r) { + sysfs_remove_file(&xcp_cfg->kobj, &supp_nps_sysfs_mode.attr); + sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs); + goto err1; + } + + xcp_cfg->mode = mode; + for (i = 0; i < xcp_cfg->num_res; i++) { + xcp_res = &xcp_cfg->xcp_res[i]; + rid = xcp_res->id; + r = kobject_init_and_add(&xcp_res->kobj, + &xcp_cfg_res_sysfs_ktype, + &xcp_cfg->kobj, "%s", + xcp_res_names[rid]); + if (r) + goto err; + } + + adev->xcp_mgr->xcp_cfg = xcp_cfg; + return; +err: + for (j = 0; j < i; j++) { + xcp_res = &xcp_cfg->xcp_res[i]; + kobject_put(&xcp_res->kobj); + } + + sysfs_remove_file(&xcp_cfg->kobj, &supp_nps_sysfs_mode.attr); + sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs); +err1: + kobject_put(&xcp_cfg->kobj); +} + +void amdgpu_xcp_cfg_sysfs_fini(struct amdgpu_device *adev) +{ + struct amdgpu_xcp_res_details *xcp_res; + struct amdgpu_xcp_cfg *xcp_cfg; + int i; + + if (!adev->xcp_mgr) + return; + + xcp_cfg = adev->xcp_mgr->xcp_cfg; + for (i = 0; i < xcp_cfg->num_res; i++) { + xcp_res = &xcp_cfg->xcp_res[i]; + kobject_put(&xcp_res->kobj); + } + + sysfs_remove_file(&xcp_cfg->kobj, &supp_nps_sysfs_mode.attr); + sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs); + kobject_put(&xcp_cfg->kobj); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h index 32775260556f..b63f53242c57 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h @@ -56,6 +56,30 @@ enum AMDGPU_XCP_STATE { AMDGPU_XCP_RESUME, }; +enum amdgpu_xcp_res_id { + AMDGPU_XCP_RES_XCC, + AMDGPU_XCP_RES_DMA, + AMDGPU_XCP_RES_DEC, + AMDGPU_XCP_RES_JPEG, + AMDGPU_XCP_RES_MAX, +}; + +struct amdgpu_xcp_res_details { + enum amdgpu_xcp_res_id id; + u8 num_inst; + u8 num_shared; + struct kobject kobj; +}; + +struct amdgpu_xcp_cfg { + u8 mode; + struct amdgpu_xcp_res_details xcp_res[AMDGPU_XCP_RES_MAX]; + u8 num_res; + struct amdgpu_xcp_mgr *xcp_mgr; + struct kobject kobj; + u16 compatible_nps_modes; +}; + struct amdgpu_xcp_ip_funcs { int (*prepare_suspend)(void *handle, uint32_t inst_mask); int (*suspend)(void *handle, uint32_t inst_mask); @@ -97,6 +121,9 @@ struct amdgpu_xcp_mgr { /* Used to determine KFD memory size limits per XCP */ unsigned int num_xcp_per_mem_partition; + struct amdgpu_xcp_cfg *xcp_cfg; + uint32_t supp_xcp_modes; + uint32_t avail_xcp_modes; }; struct amdgpu_xcp_mgr_funcs { @@ -108,7 +135,9 @@ struct amdgpu_xcp_mgr_funcs { struct amdgpu_xcp_ip *ip); int (*get_xcp_mem_id)(struct amdgpu_xcp_mgr *xcp_mgr, struct amdgpu_xcp *xcp, uint8_t *mem_id); - + int (*get_xcp_res_info)(struct amdgpu_xcp_mgr *xcp_mgr, + int mode, + struct amdgpu_xcp_cfg *xcp_cfg); int (*prepare_suspend)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); int (*suspend)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); int (*prepare_resume)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); @@ -146,6 +175,9 @@ int amdgpu_xcp_open_device(struct amdgpu_device *adev, void amdgpu_xcp_release_sched(struct amdgpu_device *adev, struct amdgpu_ctx_entity *entity); +void amdgpu_xcp_cfg_sysfs_init(struct amdgpu_device *adev); +void amdgpu_xcp_cfg_sysfs_fini(struct amdgpu_device *adev); + #define amdgpu_xcp_select_scheds(adev, e, c, d, x, y) \ ((adev)->xcp_mgr && (adev)->xcp_mgr->funcs && \ (adev)->xcp_mgr->funcs->select_scheds ? \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 7de449fae1e3..74b4349e345a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -40,6 +40,11 @@ #define smnPCS_GOPX1_PCS_ERROR_STATUS 0x12200210 #define smnPCS_GOPX1_PCS_ERROR_NONCORRECTABLE_MASK 0x12200218 +#define XGMI_STATE_DISABLE 0xD1 +#define XGMI_STATE_LS0 0x81 +#define XGMI_LINK_ACTIVE 1 +#define XGMI_LINK_INACTIVE 0 + static DEFINE_MUTEX(xgmi_mutex); #define AMDGPU_MAX_XGMI_DEVICE_PER_HIVE 4 @@ -289,6 +294,42 @@ static const struct amdgpu_pcs_ras_field xgmi3x16_pcs_ras_fields[] = { SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RxCMDPktErr)}, }; +static u32 xgmi_v6_4_get_link_status(struct amdgpu_device *adev, int global_link_num) +{ + const u32 smnpcs_xgmi3x16_pcs_state_hist1 = 0x11a00070; + const int xgmi_inst = 2; + u32 link_inst; + u64 addr; + + link_inst = global_link_num % xgmi_inst; + + addr = (smnpcs_xgmi3x16_pcs_state_hist1 | (link_inst << 20)) + + adev->asic_funcs->encode_ext_smn_addressing(global_link_num / xgmi_inst); + + return RREG32_PCIE_EXT(addr); +} + +int amdgpu_get_xgmi_link_status(struct amdgpu_device *adev, int global_link_num) +{ + u32 xgmi_state_reg_val; + + switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) { + case IP_VERSION(6, 4, 0): + xgmi_state_reg_val = xgmi_v6_4_get_link_status(adev, global_link_num); + break; + default: + return -EOPNOTSUPP; + } + + if ((xgmi_state_reg_val & 0xFF) == XGMI_STATE_DISABLE) + return -ENOLINK; + + if ((xgmi_state_reg_val & 0xFF) == XGMI_STATE_LS0) + return XGMI_LINK_ACTIVE; + + return XGMI_LINK_INACTIVE; +} + /** * DOC: AMDGPU XGMI Support * @@ -667,6 +708,7 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev) task_barrier_init(&hive->tb); hive->pstate = AMDGPU_XGMI_PSTATE_UNKNOWN; hive->hi_req_gpu = NULL; + atomic_set(&hive->requested_nps_mode, UNKNOWN_MEMORY_PARTITION_MODE); /* * hive pstate on boot is high in vega20 so we have to go to low @@ -800,6 +842,23 @@ int amdgpu_xgmi_get_num_links(struct amdgpu_device *adev, return -EINVAL; } +bool amdgpu_xgmi_get_is_sharing_enabled(struct amdgpu_device *adev, + struct amdgpu_device *peer_adev) +{ + struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info; + int i; + + /* Sharing should always be enabled for non-SRIOV. */ + if (!amdgpu_sriov_vf(adev)) + return true; + + for (i = 0 ; i < top->num_nodes; ++i) + if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id) + return !!top->nodes[i].is_sharing_enabled; + + return false; +} + /* * Devices that support extended data require the entire hive to initialize with * the shared memory buffer flag set. @@ -860,8 +919,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) if (!adev->gmc.xgmi.supported) return 0; - if (!adev->gmc.xgmi.pending_reset && - amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) { + if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) { ret = psp_xgmi_initialize(&adev->psp, false, true); if (ret) { dev_err(adev->dev, @@ -907,8 +965,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) task_barrier_add_task(&hive->tb); - if (!adev->gmc.xgmi.pending_reset && - amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) { + if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) { list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { /* update node list for other device in the hive */ if (tmp_adev != adev) { @@ -985,7 +1042,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) } } - if (!ret && !adev->gmc.xgmi.pending_reset) + if (!ret) ret = amdgpu_xgmi_sysfs_add_dev_info(adev, hive); exit_unlock: @@ -1500,3 +1557,117 @@ int amdgpu_xgmi_ras_sw_init(struct amdgpu_device *adev) return 0; } + +static void amdgpu_xgmi_reset_on_init_work(struct work_struct *work) +{ + struct amdgpu_hive_info *hive = + container_of(work, struct amdgpu_hive_info, reset_on_init_work); + struct amdgpu_reset_context reset_context; + struct amdgpu_device *tmp_adev; + struct list_head device_list; + int r; + + mutex_lock(&hive->hive_lock); + + INIT_LIST_HEAD(&device_list); + list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) + list_add_tail(&tmp_adev->reset_list, &device_list); + + tmp_adev = list_first_entry(&device_list, struct amdgpu_device, + reset_list); + amdgpu_device_lock_reset_domain(tmp_adev->reset_domain); + + reset_context.method = AMD_RESET_METHOD_ON_INIT; + reset_context.reset_req_dev = tmp_adev; + reset_context.hive = hive; + reset_context.reset_device_list = &device_list; + set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); + set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags); + + amdgpu_reset_do_xgmi_reset_on_init(&reset_context); + mutex_unlock(&hive->hive_lock); + amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain); + + list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { + r = amdgpu_ras_init_badpage_info(tmp_adev); + if (r && r != -EHWPOISON) + dev_err(tmp_adev->dev, + "error during bad page data initialization"); + } +} + +static void amdgpu_xgmi_schedule_reset_on_init(struct amdgpu_hive_info *hive) +{ + INIT_WORK(&hive->reset_on_init_work, amdgpu_xgmi_reset_on_init_work); + amdgpu_reset_domain_schedule(hive->reset_domain, + &hive->reset_on_init_work); +} + +int amdgpu_xgmi_reset_on_init(struct amdgpu_device *adev) +{ + struct amdgpu_hive_info *hive; + bool reset_scheduled; + int num_devs; + + hive = amdgpu_get_xgmi_hive(adev); + if (!hive) + return -EINVAL; + + mutex_lock(&hive->hive_lock); + num_devs = atomic_read(&hive->number_devices); + reset_scheduled = false; + if (num_devs == adev->gmc.xgmi.num_physical_nodes) { + amdgpu_xgmi_schedule_reset_on_init(hive); + reset_scheduled = true; + } + + mutex_unlock(&hive->hive_lock); + amdgpu_put_xgmi_hive(hive); + + if (reset_scheduled) + flush_work(&hive->reset_on_init_work); + + return 0; +} + +int amdgpu_xgmi_request_nps_change(struct amdgpu_device *adev, + struct amdgpu_hive_info *hive, + int req_nps_mode) +{ + struct amdgpu_device *tmp_adev; + int cur_nps_mode, r; + + /* This is expected to be called only during unload of driver. The + * request needs to be placed only once for all devices in the hive. If + * one of them fail, revert the request for previous successful devices. + * After placing the request, make hive mode as UNKNOWN so that other + * devices don't request anymore. + */ + mutex_lock(&hive->hive_lock); + if (atomic_read(&hive->requested_nps_mode) == + UNKNOWN_MEMORY_PARTITION_MODE) { + dev_dbg(adev->dev, "Unexpected entry for hive NPS change"); + mutex_unlock(&hive->hive_lock); + return 0; + } + list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { + r = adev->gmc.gmc_funcs->request_mem_partition_mode( + tmp_adev, req_nps_mode); + if (r) + break; + } + if (r) { + /* Request back current mode if one of the requests failed */ + cur_nps_mode = + adev->gmc.gmc_funcs->query_mem_partition_mode(tmp_adev); + list_for_each_entry_continue_reverse( + tmp_adev, &hive->device_list, gmc.xgmi.head) + adev->gmc.gmc_funcs->request_mem_partition_mode( + tmp_adev, cur_nps_mode); + } + /* Set to UNKNOWN so that other devices don't request anymore */ + atomic_set(&hive->requested_nps_mode, UNKNOWN_MEMORY_PARTITION_MODE); + mutex_unlock(&hive->hive_lock); + + return r; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h index a3bfc16de6d4..d1282b4c6348 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h @@ -45,6 +45,8 @@ struct amdgpu_hive_info { struct amdgpu_reset_domain *reset_domain; atomic_t ras_recovery; struct ras_event_manager event_mgr; + struct work_struct reset_on_init_work; + atomic_t requested_nps_mode; }; struct amdgpu_pcs_ras_field { @@ -64,6 +66,8 @@ int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev, struct amdgpu_device *peer_adev); int amdgpu_xgmi_get_num_links(struct amdgpu_device *adev, struct amdgpu_device *peer_adev); +bool amdgpu_xgmi_get_is_sharing_enabled(struct amdgpu_device *adev, + struct amdgpu_device *peer_adev); uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev, uint64_t addr); static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev, @@ -75,5 +79,12 @@ static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev, adev->gmc.xgmi.hive_id == bo_adev->gmc.xgmi.hive_id); } int amdgpu_xgmi_ras_sw_init(struct amdgpu_device *adev); +int amdgpu_xgmi_reset_on_init(struct amdgpu_device *adev); + +int amdgpu_xgmi_request_nps_change(struct amdgpu_device *adev, + struct amdgpu_hive_info *hive, + int req_nps_mode); +int amdgpu_get_xgmi_link_status(struct amdgpu_device *adev, + int global_link_num); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h index 6e9eeaeb3de1..b4f9c2f4e92c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h @@ -28,17 +28,21 @@ #define AMD_SRIOV_MSG_VBIOS_SIZE_KB 64 #define AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB AMD_SRIOV_MSG_VBIOS_SIZE_KB #define AMD_SRIOV_MSG_DATAEXCHANGE_SIZE_KB 4 - +#define AMD_SRIOV_MSG_TMR_OFFSET_KB 2048 +#define AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB 2 +#define AMD_SRIOV_RAS_TELEMETRY_SIZE_KB 64 /* * layout - * 0 64KB 65KB 66KB - * | VBIOS | PF2VF | VF2PF | Bad Page | ... - * | 64KB | 1KB | 1KB | + * 0 64KB 65KB 66KB 68KB 132KB + * | VBIOS | PF2VF | VF2PF | Bad Page | RAS Telemetry Region | ... + * | 64KB | 1KB | 1KB | 2KB | 64KB | ... */ + #define AMD_SRIOV_MSG_SIZE_KB 1 #define AMD_SRIOV_MSG_PF2VF_OFFSET_KB AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB #define AMD_SRIOV_MSG_VF2PF_OFFSET_KB (AMD_SRIOV_MSG_PF2VF_OFFSET_KB + AMD_SRIOV_MSG_SIZE_KB) #define AMD_SRIOV_MSG_BAD_PAGE_OFFSET_KB (AMD_SRIOV_MSG_VF2PF_OFFSET_KB + AMD_SRIOV_MSG_SIZE_KB) +#define AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB (AMD_SRIOV_MSG_BAD_PAGE_OFFSET_KB + AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB) /* * PF2VF history log: @@ -86,30 +90,59 @@ enum amd_sriov_ucode_engine_id { union amd_sriov_msg_feature_flags { struct { - uint32_t error_log_collect : 1; - uint32_t host_load_ucodes : 1; - uint32_t host_flr_vramlost : 1; - uint32_t mm_bw_management : 1; - uint32_t pp_one_vf_mode : 1; - uint32_t reg_indirect_acc : 1; - uint32_t av1_support : 1; - uint32_t vcn_rb_decouple : 1; - uint32_t mes_info_enable : 1; - uint32_t reserved : 23; + uint32_t error_log_collect : 1; + uint32_t host_load_ucodes : 1; + uint32_t host_flr_vramlost : 1; + uint32_t mm_bw_management : 1; + uint32_t pp_one_vf_mode : 1; + uint32_t reg_indirect_acc : 1; + uint32_t av1_support : 1; + uint32_t vcn_rb_decouple : 1; + uint32_t mes_info_dump_enable : 1; + uint32_t ras_caps : 1; + uint32_t ras_telemetry : 1; + uint32_t reserved : 21; } flags; uint32_t all; }; union amd_sriov_reg_access_flags { struct { - uint32_t vf_reg_access_ih : 1; - uint32_t vf_reg_access_mmhub : 1; - uint32_t vf_reg_access_gc : 1; - uint32_t reserved : 29; + uint32_t vf_reg_access_ih : 1; + uint32_t vf_reg_access_mmhub : 1; + uint32_t vf_reg_access_gc : 1; + uint32_t reserved : 29; } flags; uint32_t all; }; +union amd_sriov_ras_caps { + struct { + uint64_t block_umc : 1; + uint64_t block_sdma : 1; + uint64_t block_gfx : 1; + uint64_t block_mmhub : 1; + uint64_t block_athub : 1; + uint64_t block_pcie_bif : 1; + uint64_t block_hdp : 1; + uint64_t block_xgmi_wafl : 1; + uint64_t block_df : 1; + uint64_t block_smn : 1; + uint64_t block_sem : 1; + uint64_t block_mp0 : 1; + uint64_t block_mp1 : 1; + uint64_t block_fuse : 1; + uint64_t block_mca : 1; + uint64_t block_vcn : 1; + uint64_t block_jpeg : 1; + uint64_t block_ih : 1; + uint64_t block_mpio : 1; + uint64_t poison_propogation_mode : 1; + uint64_t reserved : 44; + } bits; + uint64_t all; +}; + union amd_sriov_msg_os_info { struct { uint32_t windows : 1; @@ -158,7 +191,7 @@ struct amd_sriov_msg_pf2vf_info_header { uint32_t reserved[2]; }; -#define AMD_SRIOV_MSG_PF2VF_INFO_FILLED_SIZE (49) +#define AMD_SRIOV_MSG_PF2VF_INFO_FILLED_SIZE (55) struct amd_sriov_msg_pf2vf_info { /* header contains size and version */ struct amd_sriov_msg_pf2vf_info_header header; @@ -211,6 +244,12 @@ struct amd_sriov_msg_pf2vf_info { uint32_t pcie_atomic_ops_support_flags; /* Portion of GPU memory occupied by VF. MAX value is 65535, but set to uint32_t to maintain alignment with reserved size */ uint32_t gpu_capacity; + /* vf bdf on host pci tree for debug only */ + uint32_t bdf_on_host; + uint32_t more_bp; //Reserved for future use. + union amd_sriov_ras_caps ras_en_caps; + union amd_sriov_ras_caps ras_telemetry_en_caps; + /* reserved */ uint32_t reserved[256 - AMD_SRIOV_MSG_PF2VF_INFO_FILLED_SIZE]; } __packed; @@ -283,8 +322,12 @@ enum amd_sriov_mailbox_request_message { MB_REQ_MSG_REL_GPU_FINI_ACCESS, MB_REQ_MSG_REQ_GPU_RESET_ACCESS, MB_REQ_MSG_REQ_GPU_INIT_DATA, + MB_REQ_MSG_PSP_VF_CMD_RELAY, MB_REQ_MSG_LOG_VF_ERROR = 200, + MB_REQ_MSG_READY_TO_RESET = 201, + MB_REQ_MSG_RAS_POISON = 202, + MB_REQ_RAS_ERROR_COUNT = 203, }; /* mailbox message send from host to guest */ @@ -297,10 +340,60 @@ enum amd_sriov_mailbox_response_message { MB_RES_MSG_FAIL, MB_RES_MSG_QUERY_ALIVE, MB_RES_MSG_GPU_INIT_DATA_READY, + MB_RES_MSG_RAS_ERROR_COUNT_READY = 11, MB_RES_MSG_TEXT_MESSAGE = 255 }; +enum amd_sriov_ras_telemetry_gpu_block { + RAS_TELEMETRY_GPU_BLOCK_UMC = 0, + RAS_TELEMETRY_GPU_BLOCK_SDMA = 1, + RAS_TELEMETRY_GPU_BLOCK_GFX = 2, + RAS_TELEMETRY_GPU_BLOCK_MMHUB = 3, + RAS_TELEMETRY_GPU_BLOCK_ATHUB = 4, + RAS_TELEMETRY_GPU_BLOCK_PCIE_BIF = 5, + RAS_TELEMETRY_GPU_BLOCK_HDP = 6, + RAS_TELEMETRY_GPU_BLOCK_XGMI_WAFL = 7, + RAS_TELEMETRY_GPU_BLOCK_DF = 8, + RAS_TELEMETRY_GPU_BLOCK_SMN = 9, + RAS_TELEMETRY_GPU_BLOCK_SEM = 10, + RAS_TELEMETRY_GPU_BLOCK_MP0 = 11, + RAS_TELEMETRY_GPU_BLOCK_MP1 = 12, + RAS_TELEMETRY_GPU_BLOCK_FUSE = 13, + RAS_TELEMETRY_GPU_BLOCK_MCA = 14, + RAS_TELEMETRY_GPU_BLOCK_VCN = 15, + RAS_TELEMETRY_GPU_BLOCK_JPEG = 16, + RAS_TELEMETRY_GPU_BLOCK_IH = 17, + RAS_TELEMETRY_GPU_BLOCK_MPIO = 18, + RAS_TELEMETRY_GPU_BLOCK_COUNT = 19, +}; + +struct amd_sriov_ras_telemetry_header { + uint32_t checksum; + uint32_t used_size; + uint32_t reserved[2]; +}; + +struct amd_sriov_ras_telemetry_error_count { + struct { + uint32_t ce_count; + uint32_t ue_count; + uint32_t de_count; + uint32_t ce_overflow_count; + uint32_t ue_overflow_count; + uint32_t de_overflow_count; + uint32_t reserved[6]; + } block[RAS_TELEMETRY_GPU_BLOCK_COUNT]; +}; + +struct amdsriov_ras_telemetry { + struct amd_sriov_ras_telemetry_header header; + + union { + struct amd_sriov_ras_telemetry_error_count error_count; + } body; +}; + /* version data stored in MAILBOX_MSGBUF_RCV_DW1 for future expansion */ enum amd_sriov_gpu_init_data_version { GPU_INIT_DATA_READY_V1 = 1, diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c index 5e8833e4fed2..e157d6d857b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c @@ -447,6 +447,72 @@ static int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int x return 0; } +static int aqua_vanjaram_get_xcp_res_info(struct amdgpu_xcp_mgr *xcp_mgr, + int mode, + struct amdgpu_xcp_cfg *xcp_cfg) +{ + struct amdgpu_device *adev = xcp_mgr->adev; + int max_res[AMDGPU_XCP_RES_MAX] = {}; + bool res_lt_xcp; + int num_xcp, i; + u16 nps_modes; + + if (!(xcp_mgr->supp_xcp_modes & BIT(mode))) + return -EINVAL; + + max_res[AMDGPU_XCP_RES_XCC] = NUM_XCC(adev->gfx.xcc_mask); + max_res[AMDGPU_XCP_RES_DMA] = adev->sdma.num_instances; + max_res[AMDGPU_XCP_RES_DEC] = adev->vcn.num_vcn_inst; + max_res[AMDGPU_XCP_RES_JPEG] = adev->jpeg.num_jpeg_inst; + + switch (mode) { + case AMDGPU_SPX_PARTITION_MODE: + num_xcp = 1; + nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE); + break; + case AMDGPU_DPX_PARTITION_MODE: + num_xcp = 2; + nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE); + break; + case AMDGPU_TPX_PARTITION_MODE: + num_xcp = 3; + nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) | + BIT(AMDGPU_NPS4_PARTITION_MODE); + break; + case AMDGPU_QPX_PARTITION_MODE: + num_xcp = 4; + nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) | + BIT(AMDGPU_NPS4_PARTITION_MODE); + break; + case AMDGPU_CPX_PARTITION_MODE: + num_xcp = NUM_XCC(adev->gfx.xcc_mask); + nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) | + BIT(AMDGPU_NPS4_PARTITION_MODE); + break; + default: + return -EINVAL; + } + + xcp_cfg->compatible_nps_modes = + (adev->gmc.supported_nps_modes & nps_modes); + xcp_cfg->num_res = ARRAY_SIZE(max_res); + + for (i = 0; i < xcp_cfg->num_res; i++) { + res_lt_xcp = max_res[i] < num_xcp; + xcp_cfg->xcp_res[i].id = i; + xcp_cfg->xcp_res[i].num_inst = + res_lt_xcp ? 1 : max_res[i] / num_xcp; + xcp_cfg->xcp_res[i].num_inst = + i == AMDGPU_XCP_RES_JPEG ? + xcp_cfg->xcp_res[i].num_inst * + adev->jpeg.num_jpeg_rings : xcp_cfg->xcp_res[i].num_inst; + xcp_cfg->xcp_res[i].num_shared = + res_lt_xcp ? num_xcp / max_res[i] : 1; + } + + return 0; +} + static enum amdgpu_gfx_partition __aqua_vanjaram_get_auto_mode(struct amdgpu_xcp_mgr *xcp_mgr) { @@ -482,7 +548,7 @@ static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr, case AMDGPU_SPX_PARTITION_MODE: return adev->gmc.num_mem_partitions == 1 && num_xcc > 0; case AMDGPU_DPX_PARTITION_MODE: - return adev->gmc.num_mem_partitions != 8 && (num_xcc % 4) == 0; + return adev->gmc.num_mem_partitions <= 2 && (num_xcc % 4) == 0; case AMDGPU_TPX_PARTITION_MODE: return (adev->gmc.num_mem_partitions == 1 || adev->gmc.num_mem_partitions == 3) && @@ -530,6 +596,57 @@ static int __aqua_vanjaram_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, return ret; } +static void +__aqua_vanjaram_update_supported_modes(struct amdgpu_xcp_mgr *xcp_mgr) +{ + struct amdgpu_device *adev = xcp_mgr->adev; + + xcp_mgr->supp_xcp_modes = 0; + + switch (NUM_XCC(adev->gfx.xcc_mask)) { + case 8: + xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) | + BIT(AMDGPU_DPX_PARTITION_MODE) | + BIT(AMDGPU_QPX_PARTITION_MODE) | + BIT(AMDGPU_CPX_PARTITION_MODE); + break; + case 6: + xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) | + BIT(AMDGPU_TPX_PARTITION_MODE) | + BIT(AMDGPU_CPX_PARTITION_MODE); + break; + case 4: + xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) | + BIT(AMDGPU_DPX_PARTITION_MODE) | + BIT(AMDGPU_CPX_PARTITION_MODE); + break; + /* this seems only existing in emulation phase */ + case 2: + xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) | + BIT(AMDGPU_CPX_PARTITION_MODE); + break; + case 1: + xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) | + BIT(AMDGPU_CPX_PARTITION_MODE); + break; + + default: + break; + } +} + +static void __aqua_vanjaram_update_available_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr) +{ + int mode; + + xcp_mgr->avail_xcp_modes = 0; + + for_each_inst(mode, xcp_mgr->supp_xcp_modes) { + if (__aqua_vanjaram_is_valid_mode(xcp_mgr, mode)) + xcp_mgr->avail_xcp_modes |= BIT(mode); + } +} + static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, int mode, int *num_xcps) { @@ -578,6 +695,8 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, amdgpu_xcp_init(xcp_mgr, *num_xcps, mode); ret = __aqua_vanjaram_post_partition_switch(xcp_mgr, flags); + if (!ret) + __aqua_vanjaram_update_available_partition_mode(xcp_mgr); unlock: if (flags & AMDGPU_XCP_OPS_KFD) amdgpu_amdkfd_unlock_kfd(adev); @@ -656,9 +775,11 @@ struct amdgpu_xcp_mgr_funcs aqua_vanjaram_xcp_funcs = { .switch_partition_mode = &aqua_vanjaram_switch_partition_mode, .query_partition_mode = &aqua_vanjaram_query_partition_mode, .get_ip_details = &aqua_vanjaram_get_xcp_ip_details, + .get_xcp_res_info = &aqua_vanjaram_get_xcp_res_info, .get_xcp_mem_id = &aqua_vanjaram_get_xcp_mem_id, .select_scheds = &aqua_vanjaram_select_scheds, - .update_partition_sched_list = &aqua_vanjaram_update_partition_sched_list + .update_partition_sched_list = + &aqua_vanjaram_update_partition_sched_list }; static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev) @@ -673,6 +794,7 @@ static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev) if (ret) return ret; + __aqua_vanjaram_update_supported_modes(adev->xcp_mgr); /* TODO: Default memory node affinity init */ return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index cf1d5d462b67..e2cb1f080e88 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -1985,9 +1985,9 @@ static const struct amdgpu_asic_funcs cik_asic_funcs = .query_video_codecs = &cik_query_video_codecs, }; -static int cik_common_early_init(void *handle) +static int cik_common_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->smc_rreg = &cik_smc_rreg; adev->smc_wreg = &cik_smc_wreg; @@ -2124,19 +2124,9 @@ static int cik_common_early_init(void *handle) return 0; } -static int cik_common_sw_init(void *handle) +static int cik_common_hw_init(struct amdgpu_ip_block *ip_block) { - return 0; -} - -static int cik_common_sw_fini(void *handle) -{ - return 0; -} - -static int cik_common_hw_init(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* move the golden regs per IP block */ cik_init_golden_registers(adev); @@ -2148,23 +2138,14 @@ static int cik_common_hw_init(void *handle) return 0; } -static int cik_common_hw_fini(void *handle) +static int cik_common_hw_fini(struct amdgpu_ip_block *ip_block) { return 0; } -static int cik_common_suspend(void *handle) +static int cik_common_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return cik_common_hw_fini(adev); -} - -static int cik_common_resume(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return cik_common_hw_init(adev); + return cik_common_hw_init(ip_block); } static bool cik_common_is_idle(void *handle) @@ -2172,12 +2153,9 @@ static bool cik_common_is_idle(void *handle) return true; } -static int cik_common_wait_for_idle(void *handle) -{ - return 0; -} -static int cik_common_soft_reset(void *handle) + +static int cik_common_soft_reset(struct amdgpu_ip_block *ip_block) { /* XXX hard reset?? */ return 0; @@ -2198,20 +2176,13 @@ static int cik_common_set_powergating_state(void *handle, static const struct amd_ip_funcs cik_common_ip_funcs = { .name = "cik_common", .early_init = cik_common_early_init, - .late_init = NULL, - .sw_init = cik_common_sw_init, - .sw_fini = cik_common_sw_fini, .hw_init = cik_common_hw_init, .hw_fini = cik_common_hw_fini, - .suspend = cik_common_suspend, .resume = cik_common_resume, .is_idle = cik_common_is_idle, - .wait_for_idle = cik_common_wait_for_idle, .soft_reset = cik_common_soft_reset, .set_clockgating_state = cik_common_set_clockgating_state, .set_powergating_state = cik_common_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ip_block_version cik_common_ip_block = diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c index 576baa9dbb0e..1da17755ad53 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c @@ -283,9 +283,9 @@ static void cik_ih_set_rptr(struct amdgpu_device *adev, WREG32(mmIH_RB_RPTR, ih->rptr); } -static int cik_ih_early_init(void *handle) +static int cik_ih_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int ret; ret = amdgpu_irq_add_domain(adev); @@ -297,10 +297,10 @@ static int cik_ih_early_init(void *handle) return 0; } -static int cik_ih_sw_init(void *handle) +static int cik_ih_sw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false); if (r) @@ -311,9 +311,9 @@ static int cik_ih_sw_init(void *handle) return r; } -static int cik_ih_sw_fini(void *handle) +static int cik_ih_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_fini_sw(adev); amdgpu_irq_remove_domain(adev); @@ -321,34 +321,28 @@ static int cik_ih_sw_fini(void *handle) return 0; } -static int cik_ih_hw_init(void *handle) +static int cik_ih_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return cik_ih_irq_init(adev); } -static int cik_ih_hw_fini(void *handle) +static int cik_ih_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - cik_ih_irq_disable(adev); + cik_ih_irq_disable(ip_block->adev); return 0; } -static int cik_ih_suspend(void *handle) +static int cik_ih_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return cik_ih_hw_fini(adev); + return cik_ih_hw_fini(ip_block); } -static int cik_ih_resume(void *handle) +static int cik_ih_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return cik_ih_hw_init(adev); + return cik_ih_hw_init(ip_block); } static bool cik_ih_is_idle(void *handle) @@ -362,11 +356,11 @@ static bool cik_ih_is_idle(void *handle) return true; } -static int cik_ih_wait_for_idle(void *handle) +static int cik_ih_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { /* read MC_STATUS */ @@ -378,9 +372,9 @@ static int cik_ih_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int cik_ih_soft_reset(void *handle) +static int cik_ih_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 srbm_soft_reset = 0; u32 tmp = RREG32(mmSRBM_STATUS); @@ -423,7 +417,6 @@ static int cik_ih_set_powergating_state(void *handle, static const struct amd_ip_funcs cik_ih_ip_funcs = { .name = "cik_ih", .early_init = cik_ih_early_init, - .late_init = NULL, .sw_init = cik_ih_sw_init, .sw_fini = cik_ih_sw_fini, .hw_init = cik_ih_hw_init, @@ -435,8 +428,6 @@ static const struct amd_ip_funcs cik_ih_ip_funcs = { .soft_reset = cik_ih_soft_reset, .set_clockgating_state = cik_ih_set_clockgating_state, .set_powergating_state = cik_ih_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ih_funcs cik_ih_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index 952737de9411..ede1a028d48d 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -54,7 +54,7 @@ static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev); static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev); static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev); static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev); -static int cik_sdma_soft_reset(void *handle); +static int cik_sdma_soft_reset(struct amdgpu_ip_block *ip_block); MODULE_FIRMWARE("amdgpu/bonaire_sdma.bin"); MODULE_FIRMWARE("amdgpu/bonaire_sdma1.bin"); @@ -918,9 +918,9 @@ static void cik_enable_sdma_mgls(struct amdgpu_device *adev, } } -static int cik_sdma_early_init(void *handle) +static int cik_sdma_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; adev->sdma.num_instances = SDMA_MAX_INSTANCE; @@ -937,10 +937,10 @@ static int cik_sdma_early_init(void *handle) return 0; } -static int cik_sdma_sw_init(void *handle) +static int cik_sdma_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r, i; /* SDMA trap event */ @@ -977,9 +977,9 @@ static int cik_sdma_sw_init(void *handle) return r; } -static int cik_sdma_sw_fini(void *handle) +static int cik_sdma_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; for (i = 0; i < adev->sdma.num_instances; i++) @@ -989,10 +989,10 @@ static int cik_sdma_sw_fini(void *handle) return 0; } -static int cik_sdma_hw_init(void *handle) +static int cik_sdma_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = cik_sdma_start(adev); if (r) @@ -1001,9 +1001,9 @@ static int cik_sdma_hw_init(void *handle) return r; } -static int cik_sdma_hw_fini(void *handle) +static int cik_sdma_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; cik_ctx_switch_enable(adev, false); cik_sdma_enable(adev, false); @@ -1011,20 +1011,16 @@ static int cik_sdma_hw_fini(void *handle) return 0; } -static int cik_sdma_suspend(void *handle) +static int cik_sdma_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return cik_sdma_hw_fini(adev); + return cik_sdma_hw_fini(ip_block); } -static int cik_sdma_resume(void *handle) +static int cik_sdma_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - cik_sdma_soft_reset(handle); + cik_sdma_soft_reset(ip_block); - return cik_sdma_hw_init(adev); + return cik_sdma_hw_init(ip_block); } static bool cik_sdma_is_idle(void *handle) @@ -1039,11 +1035,11 @@ static bool cik_sdma_is_idle(void *handle) return true; } -static int cik_sdma_wait_for_idle(void *handle) +static int cik_sdma_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK | @@ -1056,10 +1052,10 @@ static int cik_sdma_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int cik_sdma_soft_reset(void *handle) +static int cik_sdma_soft_reset(struct amdgpu_ip_block *ip_block) { u32 srbm_soft_reset = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 tmp; /* sdma0 */ @@ -1217,7 +1213,6 @@ static int cik_sdma_set_powergating_state(void *handle, static const struct amd_ip_funcs cik_sdma_ip_funcs = { .name = "cik_sdma", .early_init = cik_sdma_early_init, - .late_init = NULL, .sw_init = cik_sdma_sw_init, .sw_fini = cik_sdma_sw_fini, .hw_init = cik_sdma_hw_init, @@ -1229,8 +1224,6 @@ static const struct amd_ip_funcs cik_sdma_ip_funcs = { .soft_reset = cik_sdma_soft_reset, .set_clockgating_state = cik_sdma_set_clockgating_state, .set_powergating_state = cik_sdma_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c index 072643787384..d72973bd570d 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c @@ -274,9 +274,9 @@ static void cz_ih_set_rptr(struct amdgpu_device *adev, WREG32(mmIH_RB_RPTR, ih->rptr); } -static int cz_ih_early_init(void *handle) +static int cz_ih_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int ret; ret = amdgpu_irq_add_domain(adev); @@ -288,10 +288,10 @@ static int cz_ih_early_init(void *handle) return 0; } -static int cz_ih_sw_init(void *handle) +static int cz_ih_sw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false); if (r) @@ -302,9 +302,9 @@ static int cz_ih_sw_init(void *handle) return r; } -static int cz_ih_sw_fini(void *handle) +static int cz_ih_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_fini_sw(adev); amdgpu_irq_remove_domain(adev); @@ -312,10 +312,10 @@ static int cz_ih_sw_fini(void *handle) return 0; } -static int cz_ih_hw_init(void *handle) +static int cz_ih_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = cz_ih_irq_init(adev); if (r) @@ -324,27 +324,21 @@ static int cz_ih_hw_init(void *handle) return 0; } -static int cz_ih_hw_fini(void *handle) +static int cz_ih_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - cz_ih_irq_disable(adev); + cz_ih_irq_disable(ip_block->adev); return 0; } -static int cz_ih_suspend(void *handle) +static int cz_ih_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return cz_ih_hw_fini(adev); + return cz_ih_hw_fini(ip_block); } -static int cz_ih_resume(void *handle) +static int cz_ih_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return cz_ih_hw_init(adev); + return cz_ih_hw_init(ip_block); } static bool cz_ih_is_idle(void *handle) @@ -358,11 +352,11 @@ static bool cz_ih_is_idle(void *handle) return true; } -static int cz_ih_wait_for_idle(void *handle) +static int cz_ih_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { /* read MC_STATUS */ @@ -374,10 +368,10 @@ static int cz_ih_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int cz_ih_soft_reset(void *handle) +static int cz_ih_soft_reset(struct amdgpu_ip_block *ip_block) { u32 srbm_soft_reset = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 tmp = RREG32(mmSRBM_STATUS); if (tmp & SRBM_STATUS__IH_BUSY_MASK) @@ -421,7 +415,6 @@ static int cz_ih_set_powergating_state(void *handle, static const struct amd_ip_funcs cz_ih_ip_funcs = { .name = "cz_ih", .early_init = cz_ih_early_init, - .late_init = NULL, .sw_init = cz_ih_sw_init, .sw_fini = cz_ih_sw_fini, .hw_init = cz_ih_hw_init, @@ -433,8 +426,6 @@ static const struct amd_ip_funcs cz_ih_ip_funcs = { .soft_reset = cz_ih_soft_reset, .set_clockgating_state = cz_ih_set_clockgating_state, .set_powergating_state = cz_ih_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ih_funcs cz_ih_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 70c1399f738d..5098c50d54c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -2738,9 +2738,9 @@ static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index) return 0; } -static int dce_v10_0_early_init(void *handle) +static int dce_v10_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->audio_endpt_rreg = &dce_v10_0_audio_endpt_rreg; adev->audio_endpt_wreg = &dce_v10_0_audio_endpt_wreg; @@ -2765,10 +2765,10 @@ static int dce_v10_0_early_init(void *handle) return 0; } -static int dce_v10_0_sw_init(void *handle) +static int dce_v10_0_sw_init(struct amdgpu_ip_block *ip_block) { int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->mode_info.num_crtc; i++) { r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq); @@ -2844,9 +2844,9 @@ static int dce_v10_0_sw_init(void *handle) return 0; } -static int dce_v10_0_sw_fini(void *handle) +static int dce_v10_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; drm_edid_free(adev->mode_info.bios_hardcoded_edid); @@ -2862,10 +2862,10 @@ static int dce_v10_0_sw_fini(void *handle) return 0; } -static int dce_v10_0_hw_init(void *handle) +static int dce_v10_0_hw_init(struct amdgpu_ip_block *ip_block) { int i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; dce_v10_0_init_golden_registers(adev); @@ -2887,10 +2887,10 @@ static int dce_v10_0_hw_init(void *handle) return 0; } -static int dce_v10_0_hw_fini(void *handle) +static int dce_v10_0_hw_fini(struct amdgpu_ip_block *ip_block) { int i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; dce_v10_0_hpd_fini(adev); @@ -2905,9 +2905,9 @@ static int dce_v10_0_hw_fini(void *handle) return 0; } -static int dce_v10_0_suspend(void *handle) +static int dce_v10_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_display_suspend_helper(adev); @@ -2917,18 +2917,18 @@ static int dce_v10_0_suspend(void *handle) adev->mode_info.bl_level = amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); - return dce_v10_0_hw_fini(handle); + return dce_v10_0_hw_fini(ip_block); } -static int dce_v10_0_resume(void *handle) +static int dce_v10_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int ret; amdgpu_atombios_encoder_set_backlight_level_to_reg(adev, adev->mode_info.bl_level); - ret = dce_v10_0_hw_init(handle); + ret = dce_v10_0_hw_init(ip_block); /* turn on the BL */ if (adev->mode_info.bl_encoder) { @@ -2948,22 +2948,17 @@ static bool dce_v10_0_is_idle(void *handle) return true; } -static int dce_v10_0_wait_for_idle(void *handle) +static bool dce_v10_0_check_soft_reset(struct amdgpu_ip_block *ip_block) { - return 0; -} - -static bool dce_v10_0_check_soft_reset(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return dce_v10_0_is_display_hung(adev); } -static int dce_v10_0_soft_reset(void *handle) +static int dce_v10_0_soft_reset(struct amdgpu_ip_block *ip_block) { u32 srbm_soft_reset = 0, tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (dce_v10_0_is_display_hung(adev)) srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK; @@ -3322,7 +3317,6 @@ static int dce_v10_0_set_powergating_state(void *handle, static const struct amd_ip_funcs dce_v10_0_ip_funcs = { .name = "dce_v10_0", .early_init = dce_v10_0_early_init, - .late_init = NULL, .sw_init = dce_v10_0_sw_init, .sw_fini = dce_v10_0_sw_fini, .hw_init = dce_v10_0_hw_init, @@ -3330,13 +3324,10 @@ static const struct amd_ip_funcs dce_v10_0_ip_funcs = { .suspend = dce_v10_0_suspend, .resume = dce_v10_0_resume, .is_idle = dce_v10_0_is_idle, - .wait_for_idle = dce_v10_0_wait_for_idle, .check_soft_reset = dce_v10_0_check_soft_reset, .soft_reset = dce_v10_0_soft_reset, .set_clockgating_state = dce_v10_0_set_clockgating_state, .set_powergating_state = dce_v10_0_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static void diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index f154c24499c8..c5680ff4ab9f 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -2851,9 +2851,9 @@ static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index) return 0; } -static int dce_v11_0_early_init(void *handle) +static int dce_v11_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->audio_endpt_rreg = &dce_v11_0_audio_endpt_rreg; adev->audio_endpt_wreg = &dce_v11_0_audio_endpt_wreg; @@ -2891,10 +2891,10 @@ static int dce_v11_0_early_init(void *handle) return 0; } -static int dce_v11_0_sw_init(void *handle) +static int dce_v11_0_sw_init(struct amdgpu_ip_block *ip_block) { int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->mode_info.num_crtc; i++) { r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq); @@ -2971,9 +2971,9 @@ static int dce_v11_0_sw_init(void *handle) return 0; } -static int dce_v11_0_sw_fini(void *handle) +static int dce_v11_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; drm_edid_free(adev->mode_info.bios_hardcoded_edid); @@ -2989,10 +2989,10 @@ static int dce_v11_0_sw_fini(void *handle) return 0; } -static int dce_v11_0_hw_init(void *handle) +static int dce_v11_0_hw_init(struct amdgpu_ip_block *ip_block) { int i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; dce_v11_0_init_golden_registers(adev); @@ -3025,10 +3025,10 @@ static int dce_v11_0_hw_init(void *handle) return 0; } -static int dce_v11_0_hw_fini(void *handle) +static int dce_v11_0_hw_fini(struct amdgpu_ip_block *ip_block) { int i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; dce_v11_0_hpd_fini(adev); @@ -3043,9 +3043,9 @@ static int dce_v11_0_hw_fini(void *handle) return 0; } -static int dce_v11_0_suspend(void *handle) +static int dce_v11_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_display_suspend_helper(adev); @@ -3055,18 +3055,18 @@ static int dce_v11_0_suspend(void *handle) adev->mode_info.bl_level = amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); - return dce_v11_0_hw_fini(handle); + return dce_v11_0_hw_fini(ip_block); } -static int dce_v11_0_resume(void *handle) +static int dce_v11_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int ret; amdgpu_atombios_encoder_set_backlight_level_to_reg(adev, adev->mode_info.bl_level); - ret = dce_v11_0_hw_init(handle); + ret = dce_v11_0_hw_init(ip_block); /* turn on the BL */ if (adev->mode_info.bl_encoder) { @@ -3086,15 +3086,10 @@ static bool dce_v11_0_is_idle(void *handle) return true; } -static int dce_v11_0_wait_for_idle(void *handle) -{ - return 0; -} - -static int dce_v11_0_soft_reset(void *handle) +static int dce_v11_0_soft_reset(struct amdgpu_ip_block *ip_block) { u32 srbm_soft_reset = 0, tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (dce_v11_0_is_display_hung(adev)) srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK; @@ -3454,7 +3449,6 @@ static int dce_v11_0_set_powergating_state(void *handle, static const struct amd_ip_funcs dce_v11_0_ip_funcs = { .name = "dce_v11_0", .early_init = dce_v11_0_early_init, - .late_init = NULL, .sw_init = dce_v11_0_sw_init, .sw_fini = dce_v11_0_sw_fini, .hw_init = dce_v11_0_hw_init, @@ -3462,12 +3456,9 @@ static const struct amd_ip_funcs dce_v11_0_ip_funcs = { .suspend = dce_v11_0_suspend, .resume = dce_v11_0_resume, .is_idle = dce_v11_0_is_idle, - .wait_for_idle = dce_v11_0_wait_for_idle, .soft_reset = dce_v11_0_soft_reset, .set_clockgating_state = dce_v11_0_set_clockgating_state, .set_powergating_state = dce_v11_0_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static void diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index a7fcb135827f..eb7de9122d99 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -2633,9 +2633,9 @@ static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index) return 0; } -static int dce_v6_0_early_init(void *handle) +static int dce_v6_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->audio_endpt_rreg = &dce_v6_0_audio_endpt_rreg; adev->audio_endpt_wreg = &dce_v6_0_audio_endpt_wreg; @@ -2664,11 +2664,11 @@ static int dce_v6_0_early_init(void *handle) return 0; } -static int dce_v6_0_sw_init(void *handle) +static int dce_v6_0_sw_init(struct amdgpu_ip_block *ip_block) { int r, i; bool ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->mode_info.num_crtc; i++) { r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq); @@ -2743,9 +2743,9 @@ static int dce_v6_0_sw_init(void *handle) return r; } -static int dce_v6_0_sw_fini(void *handle) +static int dce_v6_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; drm_edid_free(adev->mode_info.bios_hardcoded_edid); @@ -2760,10 +2760,10 @@ static int dce_v6_0_sw_fini(void *handle) return 0; } -static int dce_v6_0_hw_init(void *handle) +static int dce_v6_0_hw_init(struct amdgpu_ip_block *ip_block) { int i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* disable vga render */ dce_v6_0_set_vga_render_state(adev, false); @@ -2783,10 +2783,10 @@ static int dce_v6_0_hw_init(void *handle) return 0; } -static int dce_v6_0_hw_fini(void *handle) +static int dce_v6_0_hw_fini(struct amdgpu_ip_block *ip_block) { int i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; dce_v6_0_hpd_fini(adev); @@ -2801,9 +2801,9 @@ static int dce_v6_0_hw_fini(void *handle) return 0; } -static int dce_v6_0_suspend(void *handle) +static int dce_v6_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_display_suspend_helper(adev); @@ -2812,18 +2812,18 @@ static int dce_v6_0_suspend(void *handle) adev->mode_info.bl_level = amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); - return dce_v6_0_hw_fini(handle); + return dce_v6_0_hw_fini(ip_block); } -static int dce_v6_0_resume(void *handle) +static int dce_v6_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int ret; amdgpu_atombios_encoder_set_backlight_level_to_reg(adev, adev->mode_info.bl_level); - ret = dce_v6_0_hw_init(handle); + ret = dce_v6_0_hw_init(ip_block); /* turn on the BL */ if (adev->mode_info.bl_encoder) { @@ -2843,12 +2843,7 @@ static bool dce_v6_0_is_idle(void *handle) return true; } -static int dce_v6_0_wait_for_idle(void *handle) -{ - return 0; -} - -static int dce_v6_0_soft_reset(void *handle) +static int dce_v6_0_soft_reset(struct amdgpu_ip_block *ip_block) { DRM_INFO("xxxx: dce_v6_0_soft_reset --- no impl!!\n"); return 0; @@ -3144,7 +3139,6 @@ static int dce_v6_0_set_powergating_state(void *handle, static const struct amd_ip_funcs dce_v6_0_ip_funcs = { .name = "dce_v6_0", .early_init = dce_v6_0_early_init, - .late_init = NULL, .sw_init = dce_v6_0_sw_init, .sw_fini = dce_v6_0_sw_fini, .hw_init = dce_v6_0_hw_init, @@ -3152,12 +3146,9 @@ static const struct amd_ip_funcs dce_v6_0_ip_funcs = { .suspend = dce_v6_0_suspend, .resume = dce_v6_0_resume, .is_idle = dce_v6_0_is_idle, - .wait_for_idle = dce_v6_0_wait_for_idle, .soft_reset = dce_v6_0_soft_reset, .set_clockgating_state = dce_v6_0_set_clockgating_state, .set_powergating_state = dce_v6_0_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static void diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 77ac3f114d24..04b79ff87f75 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -2644,9 +2644,9 @@ static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index) return 0; } -static int dce_v8_0_early_init(void *handle) +static int dce_v8_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->audio_endpt_rreg = &dce_v8_0_audio_endpt_rreg; adev->audio_endpt_wreg = &dce_v8_0_audio_endpt_wreg; @@ -2680,10 +2680,10 @@ static int dce_v8_0_early_init(void *handle) return 0; } -static int dce_v8_0_sw_init(void *handle) +static int dce_v8_0_sw_init(struct amdgpu_ip_block *ip_block) { int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->mode_info.num_crtc; i++) { r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq); @@ -2764,9 +2764,9 @@ static int dce_v8_0_sw_init(void *handle) return 0; } -static int dce_v8_0_sw_fini(void *handle) +static int dce_v8_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; drm_edid_free(adev->mode_info.bios_hardcoded_edid); @@ -2782,10 +2782,10 @@ static int dce_v8_0_sw_fini(void *handle) return 0; } -static int dce_v8_0_hw_init(void *handle) +static int dce_v8_0_hw_init(struct amdgpu_ip_block *ip_block) { int i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* disable vga render */ dce_v8_0_set_vga_render_state(adev, false); @@ -2805,10 +2805,10 @@ static int dce_v8_0_hw_init(void *handle) return 0; } -static int dce_v8_0_hw_fini(void *handle) +static int dce_v8_0_hw_fini(struct amdgpu_ip_block *ip_block) { int i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; dce_v8_0_hpd_fini(adev); @@ -2823,9 +2823,9 @@ static int dce_v8_0_hw_fini(void *handle) return 0; } -static int dce_v8_0_suspend(void *handle) +static int dce_v8_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_display_suspend_helper(adev); @@ -2835,18 +2835,18 @@ static int dce_v8_0_suspend(void *handle) adev->mode_info.bl_level = amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); - return dce_v8_0_hw_fini(handle); + return dce_v8_0_hw_fini(ip_block); } -static int dce_v8_0_resume(void *handle) +static int dce_v8_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int ret; amdgpu_atombios_encoder_set_backlight_level_to_reg(adev, adev->mode_info.bl_level); - ret = dce_v8_0_hw_init(handle); + ret = dce_v8_0_hw_init(ip_block); /* turn on the BL */ if (adev->mode_info.bl_encoder) { @@ -2866,15 +2866,10 @@ static bool dce_v8_0_is_idle(void *handle) return true; } -static int dce_v8_0_wait_for_idle(void *handle) -{ - return 0; -} - -static int dce_v8_0_soft_reset(void *handle) +static int dce_v8_0_soft_reset(struct amdgpu_ip_block *ip_block) { u32 srbm_soft_reset = 0, tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (dce_v8_0_is_display_hung(adev)) srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK; @@ -3232,7 +3227,6 @@ static int dce_v8_0_set_powergating_state(void *handle, static const struct amd_ip_funcs dce_v8_0_ip_funcs = { .name = "dce_v8_0", .early_init = dce_v8_0_early_init, - .late_init = NULL, .sw_init = dce_v8_0_sw_init, .sw_fini = dce_v8_0_sw_fini, .hw_init = dce_v8_0_hw_init, @@ -3240,12 +3234,9 @@ static const struct amd_ip_funcs dce_v8_0_ip_funcs = { .suspend = dce_v8_0_suspend, .resume = dce_v8_0_resume, .is_idle = dce_v8_0_is_idle, - .wait_for_idle = dce_v8_0_wait_for_idle, .soft_reset = dce_v8_0_soft_reset, .set_clockgating_state = dce_v8_0_set_clockgating_state, .set_powergating_state = dce_v8_0_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static void diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c index 483a441b46aa..621aeca53880 100644 --- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c +++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c @@ -254,8 +254,8 @@ static void df_v3_6_sw_init(struct amdgpu_device *adev) static void df_v3_6_sw_fini(struct amdgpu_device *adev) { - - device_remove_file(adev->dev, &dev_attr_df_cntr_avail); + if (adev->dev->kobj.sd) + device_remove_file(adev->dev, &dev_attr_df_cntr_avail); } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 45ed97038df0..24dce803a829 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -3677,13 +3677,19 @@ static int gfx_v10_0_set_powergating_state(void *handle, enum amd_powergating_state state); static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask) { + struct amdgpu_device *adev = kiq_ring->adev; + u64 shader_mc_addr; + + /* Cleaner shader MC address */ + shader_mc_addr = adev->gfx.cleaner_shader_gpu_addr >> 8; + amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6)); amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) | PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */ amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */ amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */ - amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */ - amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */ + amdgpu_ring_write(kiq_ring, lower_32_bits(shader_mc_addr)); /* cleaner shader addr lo */ + amdgpu_ring_write(kiq_ring, upper_32_bits(shader_mc_addr)); /* cleaner shader addr hi */ amdgpu_ring_write(kiq_ring, 0); /* oac mask */ amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */ } @@ -4683,11 +4689,11 @@ static void gfx_v10_0_alloc_ip_dump(struct amdgpu_device *adev) } } -static int gfx_v10_0_sw_init(void *handle) +static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block) { int i, j, k, r, ring_id = 0; int xcc_id = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(10, 1, 10): @@ -4726,6 +4732,11 @@ static int gfx_v10_0_sw_init(void *handle) adev->gfx.mec.num_queue_per_pipe = 8; break; } + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { + default: + adev->gfx.enable_cleaner_shader = false; + break; + } /* KIQ event */ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, @@ -4814,6 +4825,11 @@ static int gfx_v10_0_sw_init(void *handle) } } } + /* TODO: Add queue reset mask when FW fully supports it */ + adev->gfx.gfx_supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]); + adev->gfx.compute_supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]); r = amdgpu_gfx_kiq_init(adev, GFX10_MEC_HPD_SIZE, 0); if (r) { @@ -4842,6 +4858,10 @@ static int gfx_v10_0_sw_init(void *handle) gfx_v10_0_alloc_ip_dump(adev); + r = amdgpu_gfx_sysfs_init(adev); + if (r) + return r; + return 0; } @@ -4866,10 +4886,10 @@ static void gfx_v10_0_me_fini(struct amdgpu_device *adev) (void **)&adev->gfx.me.me_fw_ptr); } -static int gfx_v10_0_sw_fini(void *handle) +static int gfx_v10_0_sw_fini(struct amdgpu_ip_block *ip_block) { int i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->gfx.num_gfx_rings; i++) amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); @@ -4881,6 +4901,8 @@ static int gfx_v10_0_sw_fini(void *handle) amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring); amdgpu_gfx_kiq_fini(adev, 0); + amdgpu_gfx_cleaner_shader_sw_fini(adev); + gfx_v10_0_pfp_fini(adev); gfx_v10_0_ce_fini(adev); gfx_v10_0_me_fini(adev); @@ -4891,6 +4913,7 @@ static int gfx_v10_0_sw_fini(void *handle) gfx_v10_0_rlc_backdoor_autoload_buffer_fini(adev); gfx_v10_0_free_microcode(adev); + amdgpu_gfx_sysfs_fini(adev); kfree(adev->gfx.ip_dump_core); kfree(adev->gfx.ip_dump_compute_queues); @@ -6374,7 +6397,7 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev) WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr)); WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr)); - /* set the wb address wether it's enabled or not */ + /* set the wb address whether it's enabled or not */ rptr_addr = ring->rptr_gpu_addr; WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr)); WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & @@ -6412,7 +6435,7 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev) ring->wptr = 0; WREG32_SOC15(GC, 0, mmCP_RB1_WPTR, lower_32_bits(ring->wptr)); WREG32_SOC15(GC, 0, mmCP_RB1_WPTR_HI, upper_32_bits(ring->wptr)); - /* Set the wb address wether it's enabled or not */ + /* Set the wb address whether it's enabled or not */ rptr_addr = ring->rptr_gpu_addr; WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr)); WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & @@ -7366,14 +7389,17 @@ static void gfx_v10_0_disable_gpa_mode(struct amdgpu_device *adev) WREG32_SOC15(GC, 0, mmCPG_PSP_DEBUG, data); } -static int gfx_v10_0_hw_init(void *handle) +static int gfx_v10_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (!amdgpu_emu_mode) gfx_v10_0_init_golden_registers(adev); + amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size, + adev->gfx.cleaner_shader_ptr); + if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { /** * For gfx 10, rlc firmware loading relies on smu firmware is @@ -7418,9 +7444,9 @@ static int gfx_v10_0_hw_init(void *handle) return r; } -static int gfx_v10_0_hw_fini(void *handle) +static int gfx_v10_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); @@ -7431,7 +7457,7 @@ static int gfx_v10_0_hw_fini(void *handle) * otherwise the gfxoff disallowing will be failed to set. */ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 1)) - gfx_v10_0_set_powergating_state(handle, AMD_PG_STATE_UNGATE); + gfx_v10_0_set_powergating_state(ip_block->adev, AMD_PG_STATE_UNGATE); if (!adev->no_hw_access) { if (amdgpu_async_gfx_ring) { @@ -7456,14 +7482,14 @@ static int gfx_v10_0_hw_fini(void *handle) return 0; } -static int gfx_v10_0_suspend(void *handle) +static int gfx_v10_0_suspend(struct amdgpu_ip_block *ip_block) { - return gfx_v10_0_hw_fini(handle); + return gfx_v10_0_hw_fini(ip_block); } -static int gfx_v10_0_resume(void *handle) +static int gfx_v10_0_resume(struct amdgpu_ip_block *ip_block) { - return gfx_v10_0_hw_init(handle); + return gfx_v10_0_hw_init(ip_block); } static bool gfx_v10_0_is_idle(void *handle) @@ -7477,11 +7503,11 @@ static bool gfx_v10_0_is_idle(void *handle) return true; } -static int gfx_v10_0_wait_for_idle(void *handle) +static int gfx_v10_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned int i; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { /* read MC_STATUS */ @@ -7495,11 +7521,11 @@ static int gfx_v10_0_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int gfx_v10_0_soft_reset(void *handle) +static int gfx_v10_0_soft_reset(struct amdgpu_ip_block *ip_block) { u32 grbm_soft_reset = 0; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* GRBM_STATUS */ tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS); @@ -7678,9 +7704,9 @@ static void gfx_v10_0_ring_emit_gds_switch(struct amdgpu_ring *ring, (1 << (oa_size + oa_base)) - (1 << oa_base)); } -static int gfx_v10_0_early_init(void *handle) +static int gfx_v10_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->gfx.funcs = &gfx_v10_0_gfx_funcs; @@ -7722,9 +7748,9 @@ static int gfx_v10_0_early_init(void *handle) return gfx_v10_0_init_microcode(adev); } -static int gfx_v10_0_late_init(void *handle) +static int gfx_v10_0_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); @@ -9402,8 +9428,6 @@ static void gfx_v10_0_emit_mem_sync(struct amdgpu_ring *ring) static void gfx_v10_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop) { - int i; - /* Header itself is a NOP packet */ if (num_nop == 1) { amdgpu_ring_write(ring, ring->funcs->nop); @@ -9414,8 +9438,7 @@ static void gfx_v10_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop) amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe))); /* Header is at index 0, followed by num_nops - 1 NOP packet's */ - for (i = 1; i < num_nop; i++) - amdgpu_ring_write(ring, ring->funcs->nop); + amdgpu_ring_insert_nop(ring, num_nop - 1); } static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid) @@ -9568,9 +9591,9 @@ static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring, return amdgpu_ring_test_ring(ring); } -static void gfx_v10_ip_print(void *handle, struct drm_printer *p) +static void gfx_v10_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t i, j, k, reg, index = 0; uint32_t reg_count = ARRAY_SIZE(gc_reg_list_10_1); @@ -9632,9 +9655,9 @@ static void gfx_v10_ip_print(void *handle, struct drm_printer *p) } } -static void gfx_v10_ip_dump(void *handle) +static void gfx_v10_ip_dump(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t i, j, k, reg, index = 0; uint32_t reg_count = ARRAY_SIZE(gc_reg_list_10_1); @@ -9699,6 +9722,13 @@ static void gfx_v10_ip_dump(void *handle) amdgpu_gfx_off_ctrl(adev, true); } +static void gfx_v10_0_ring_emit_cleaner_shader(struct amdgpu_ring *ring) +{ + /* Emit the cleaner shader */ + amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0)); + amdgpu_ring_write(ring, 0); /* RESERVED field, programmed to zero */ +} + static const struct amd_ip_funcs gfx_v10_0_ip_funcs = { .name = "gfx_v10_0", .early_init = gfx_v10_0_early_init, @@ -9749,7 +9779,8 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = { 5 + /* HDP_INVL */ 8 + 8 + /* FENCE x2 */ 2 + /* SWITCH_BUFFER */ - 8, /* gfx_v10_0_emit_mem_sync */ + 8 + /* gfx_v10_0_emit_mem_sync */ + 2, /* gfx_v10_0_ring_emit_cleaner_shader */ .emit_ib_size = 4, /* gfx_v10_0_ring_emit_ib_gfx */ .emit_ib = gfx_v10_0_ring_emit_ib_gfx, .emit_fence = gfx_v10_0_ring_emit_fence, @@ -9772,6 +9803,9 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = { .soft_recovery = gfx_v10_0_ring_soft_recovery, .emit_mem_sync = gfx_v10_0_emit_mem_sync, .reset = gfx_v10_0_reset_kgq, + .emit_cleaner_shader = gfx_v10_0_ring_emit_cleaner_shader, + .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use, + .end_use = amdgpu_gfx_enforce_isolation_ring_end_use, }; static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = { @@ -9791,7 +9825,8 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = { SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 2 + /* gfx_v10_0_ring_emit_vm_flush */ 8 + 8 + 8 + /* gfx_v10_0_ring_emit_fence x3 for user fence, vm fence */ - 8, /* gfx_v10_0_emit_mem_sync */ + 8 + /* gfx_v10_0_emit_mem_sync */ + 2, /* gfx_v10_0_ring_emit_cleaner_shader */ .emit_ib_size = 7, /* gfx_v10_0_ring_emit_ib_compute */ .emit_ib = gfx_v10_0_ring_emit_ib_compute, .emit_fence = gfx_v10_0_ring_emit_fence, @@ -9809,6 +9844,9 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = { .soft_recovery = gfx_v10_0_ring_soft_recovery, .emit_mem_sync = gfx_v10_0_emit_mem_sync, .reset = gfx_v10_0_reset_kcq, + .emit_cleaner_shader = gfx_v10_0_ring_emit_cleaner_shader, + .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use, + .end_use = amdgpu_gfx_enforce_isolation_ring_end_use, }; static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index d3e8be82a172..2ae058a224f4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -46,6 +46,7 @@ #include "clearstate_gfx11.h" #include "v11_structs.h" #include "gfx_v11_0.h" +#include "gfx_v11_0_cleaner_shader.h" #include "gfx_v11_0_3.h" #include "nbio_v4_3.h" #include "mes_v11_0.h" @@ -293,14 +294,20 @@ static void gfx_v11_0_update_perf_clk(struct amdgpu_device *adev, static void gfx11_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask) { + struct amdgpu_device *adev = kiq_ring->adev; + u64 shader_mc_addr; + + /* Cleaner shader MC address */ + shader_mc_addr = adev->gfx.cleaner_shader_gpu_addr >> 8; + amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6)); amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) | PACKET3_SET_RESOURCES_UNMAP_LATENTY(0xa) | /* unmap_latency: 0xa (~ 1s) */ PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */ amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */ amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */ - amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */ - amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */ + amdgpu_ring_write(kiq_ring, lower_32_bits(shader_mc_addr)); /* cleaner shader addr lo */ + amdgpu_ring_write(kiq_ring, upper_32_bits(shader_mc_addr)); /* cleaner shader addr hi */ amdgpu_ring_write(kiq_ring, 0); /* oac mask */ amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */ } @@ -483,8 +490,6 @@ static void gfx_v11_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel, static void gfx_v11_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop) { - int i; - /* Header itself is a NOP packet */ if (num_nop == 1) { amdgpu_ring_write(ring, ring->funcs->nop); @@ -495,8 +500,7 @@ static void gfx_v11_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop) amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe))); /* Header is at index 0, followed by num_nops - 1 NOP packet's */ - for (i = 1; i < num_nop; i++) - amdgpu_ring_write(ring, ring->funcs->nop); + amdgpu_ring_insert_nop(ring, num_nop - 1); } static int gfx_v11_0_ring_test_ring(struct amdgpu_ring *ring) @@ -1536,11 +1540,11 @@ static void gfx_v11_0_alloc_ip_dump(struct amdgpu_device *adev) } } -static int gfx_v11_0_sw_init(void *handle) +static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block) { int i, j, k, r, ring_id = 0; int xcc_id = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(11, 0, 0): @@ -1575,6 +1579,29 @@ static int gfx_v11_0_sw_init(void *handle) break; } + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { + case IP_VERSION(11, 0, 0): + case IP_VERSION(11, 0, 2): + case IP_VERSION(11, 0, 3): + adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex; + adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex); + if (adev->gfx.me_fw_version >= 2280 && + adev->gfx.pfp_fw_version >= 2370 && + adev->gfx.mec_fw_version >= 2450 && + adev->mes.fw_version[0] >= 99) { + adev->gfx.enable_cleaner_shader = true; + r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size); + if (r) { + adev->gfx.enable_cleaner_shader = false; + dev_err(adev->dev, "Failed to initialize cleaner shader\n"); + } + } + break; + default: + adev->gfx.enable_cleaner_shader = false; + break; + } + /* Enable CG flag in one VF mode for enabling RLC safe mode enter/exit */ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3) && amdgpu_sriov_is_pp_one_vf(adev)) @@ -1666,6 +1693,24 @@ static int gfx_v11_0_sw_init(void *handle) } } + adev->gfx.gfx_supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]); + adev->gfx.compute_supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]); + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { + case IP_VERSION(11, 0, 0): + case IP_VERSION(11, 0, 2): + case IP_VERSION(11, 0, 3): + if ((adev->gfx.me_fw_version >= 2280) && + (adev->gfx.mec_fw_version >= 2410)) { + adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + } + break; + default: + break; + } + if (!adev->enable_mes_kiq) { r = amdgpu_gfx_kiq_init(adev, GFX11_MEC_HPD_SIZE, 0); if (r) { @@ -1700,6 +1745,10 @@ static int gfx_v11_0_sw_init(void *handle) gfx_v11_0_alloc_ip_dump(adev); + r = amdgpu_gfx_sysfs_init(adev); + if (r) + return r; + return 0; } @@ -1732,10 +1781,10 @@ static void gfx_v11_0_rlc_autoload_buffer_fini(struct amdgpu_device *adev) (void **)&adev->gfx.rlc.rlc_autoload_ptr); } -static int gfx_v11_0_sw_fini(void *handle) +static int gfx_v11_0_sw_fini(struct amdgpu_ip_block *ip_block) { int i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->gfx.num_gfx_rings; i++) amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); @@ -1749,6 +1798,8 @@ static int gfx_v11_0_sw_fini(void *handle) amdgpu_gfx_kiq_fini(adev, 0); } + amdgpu_gfx_cleaner_shader_sw_fini(adev); + gfx_v11_0_pfp_fini(adev); gfx_v11_0_me_fini(adev); gfx_v11_0_rlc_fini(adev); @@ -1759,6 +1810,8 @@ static int gfx_v11_0_sw_fini(void *handle) gfx_v11_0_free_microcode(adev); + amdgpu_gfx_sysfs_fini(adev); + kfree(adev->gfx.ip_dump_core); kfree(adev->gfx.ip_dump_compute_queues); kfree(adev->gfx.ip_dump_gfx_queues); @@ -1893,8 +1946,10 @@ static void gfx_v11_0_init_compute_vmid(struct amdgpu_device *adev) soc21_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); - /* Initialize all compute VMIDs to have no GDS, GWS, or OA - acccess. These should be enabled by FW for target VMIDs. */ + /* + * Initialize all compute VMIDs to have no GDS, GWS, or OA + * access. These should be enabled by FW for target VMIDs. + */ for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_BASE, 2 * i, 0); WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_SIZE, 2 * i, 0); @@ -3555,7 +3610,7 @@ static int gfx_v11_0_cp_gfx_resume(struct amdgpu_device *adev) WREG32_SOC15(GC, 0, regCP_RB0_WPTR, lower_32_bits(ring->wptr)); WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI, upper_32_bits(ring->wptr)); - /* set the wb address wether it's enabled or not */ + /* set the wb address whether it's enabled or not */ rptr_addr = ring->rptr_gpu_addr; WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr)); WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & @@ -3593,7 +3648,7 @@ static int gfx_v11_0_cp_gfx_resume(struct amdgpu_device *adev) ring->wptr = 0; WREG32_SOC15(GC, 0, regCP_RB1_WPTR, lower_32_bits(ring->wptr)); WREG32_SOC15(GC, 0, regCP_RB1_WPTR_HI, upper_32_bits(ring->wptr)); - /* Set the wb address wether it's enabled or not */ + /* Set the wb address whether it's enabled or not */ rptr_addr = ring->rptr_gpu_addr; WREG32_SOC15(GC, 0, regCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr)); WREG32_SOC15(GC, 0, regCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & @@ -4568,10 +4623,13 @@ static void gfx_v11_0_disable_gpa_mode(struct amdgpu_device *adev) WREG32_SOC15(GC, 0, regCPG_PSP_DEBUG, data); } -static int gfx_v11_0_hw_init(void *handle) +static int gfx_v11_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; + + amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size, + adev->gfx.cleaner_shader_ptr); if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { if (adev->gfx.imu.funcs) { @@ -4665,9 +4723,9 @@ static int gfx_v11_0_hw_init(void *handle) return r; } -static int gfx_v11_0_hw_fini(void *handle) +static int gfx_v11_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); @@ -4703,14 +4761,14 @@ static int gfx_v11_0_hw_fini(void *handle) return 0; } -static int gfx_v11_0_suspend(void *handle) +static int gfx_v11_0_suspend(struct amdgpu_ip_block *ip_block) { - return gfx_v11_0_hw_fini(handle); + return gfx_v11_0_hw_fini(ip_block); } -static int gfx_v11_0_resume(void *handle) +static int gfx_v11_0_resume(struct amdgpu_ip_block *ip_block) { - return gfx_v11_0_hw_init(handle); + return gfx_v11_0_hw_init(ip_block); } static bool gfx_v11_0_is_idle(void *handle) @@ -4724,11 +4782,11 @@ static bool gfx_v11_0_is_idle(void *handle) return true; } -static int gfx_v11_0_wait_for_idle(void *handle) +static int gfx_v11_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { /* read MC_STATUS */ @@ -4774,12 +4832,12 @@ int gfx_v11_0_request_gfx_index_mutex(struct amdgpu_device *adev, return 0; } -static int gfx_v11_0_soft_reset(void *handle) +static int gfx_v11_0_soft_reset(struct amdgpu_ip_block *ip_block) { u32 grbm_soft_reset = 0; u32 tmp; int r, i, j, k; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_gfx_rlc_enter_safe_mode(adev, 0); @@ -4905,10 +4963,10 @@ static int gfx_v11_0_soft_reset(void *handle) return gfx_v11_0_cp_resume(adev); } -static bool gfx_v11_0_check_soft_reset(void *handle) +static bool gfx_v11_0_check_soft_reset(struct amdgpu_ip_block *ip_block) { int i, r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; long tmo = msecs_to_jiffies(1000); @@ -4929,12 +4987,13 @@ static bool gfx_v11_0_check_soft_reset(void *handle) return false; } -static int gfx_v11_0_post_soft_reset(void *handle) +static int gfx_v11_0_post_soft_reset(struct amdgpu_ip_block *ip_block) { + struct amdgpu_device *adev = ip_block->adev; /** * GFX soft reset will impact MES, need resume MES when do GFX soft reset */ - return amdgpu_mes_resume((struct amdgpu_device *)handle); + return amdgpu_mes_resume(adev); } static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev) @@ -4995,9 +5054,9 @@ static void gfx_v11_0_ring_emit_gds_switch(struct amdgpu_ring *ring, (1 << (oa_size + oa_base)) - (1 << oa_base)); } -static int gfx_v11_0_early_init(void *handle) +static int gfx_v11_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->gfx.funcs = &gfx_v11_0_gfx_funcs; @@ -5018,9 +5077,9 @@ static int gfx_v11_0_early_init(void *handle) return gfx_v11_0_init_microcode(adev); } -static int gfx_v11_0_late_init(void *handle) +static int gfx_v11_0_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); @@ -6639,9 +6698,9 @@ static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid) return amdgpu_ring_test_ring(ring); } -static void gfx_v11_ip_print(void *handle, struct drm_printer *p) +static void gfx_v11_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t i, j, k, reg, index = 0; uint32_t reg_count = ARRAY_SIZE(gc_reg_list_11_0); @@ -6703,9 +6762,9 @@ static void gfx_v11_ip_print(void *handle, struct drm_printer *p) } } -static void gfx_v11_ip_dump(void *handle) +static void gfx_v11_ip_dump(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t i, j, k, reg, index = 0; uint32_t reg_count = ARRAY_SIZE(gc_reg_list_11_0); @@ -6769,6 +6828,13 @@ static void gfx_v11_ip_dump(void *handle) amdgpu_gfx_off_ctrl(adev, true); } +static void gfx_v11_0_ring_emit_cleaner_shader(struct amdgpu_ring *ring) +{ + /* Emit the cleaner shader */ + amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0)); + amdgpu_ring_write(ring, 0); /* RESERVED field, programmed to zero */ +} + static const struct amd_ip_funcs gfx_v11_0_ip_funcs = { .name = "gfx_v11_0", .early_init = gfx_v11_0_early_init, @@ -6818,7 +6884,8 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = { 5 + /* HDP_INVL */ 22 + /* SET_Q_PREEMPTION_MODE */ 8 + 8 + /* FENCE x2 */ - 8, /* gfx_v11_0_emit_mem_sync */ + 8 + /* gfx_v11_0_emit_mem_sync */ + 2, /* gfx_v11_0_ring_emit_cleaner_shader */ .emit_ib_size = 4, /* gfx_v11_0_ring_emit_ib_gfx */ .emit_ib = gfx_v11_0_ring_emit_ib_gfx, .emit_fence = gfx_v11_0_ring_emit_fence, @@ -6841,6 +6908,9 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = { .soft_recovery = gfx_v11_0_ring_soft_recovery, .emit_mem_sync = gfx_v11_0_emit_mem_sync, .reset = gfx_v11_0_reset_kgq, + .emit_cleaner_shader = gfx_v11_0_ring_emit_cleaner_shader, + .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use, + .end_use = amdgpu_gfx_enforce_isolation_ring_end_use, }; static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = { @@ -6861,7 +6931,8 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = { SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 2 + /* gfx_v11_0_ring_emit_vm_flush */ 8 + 8 + 8 + /* gfx_v11_0_ring_emit_fence x3 for user fence, vm fence */ - 8, /* gfx_v11_0_emit_mem_sync */ + 8 + /* gfx_v11_0_emit_mem_sync */ + 2, /* gfx_v11_0_ring_emit_cleaner_shader */ .emit_ib_size = 7, /* gfx_v11_0_ring_emit_ib_compute */ .emit_ib = gfx_v11_0_ring_emit_ib_compute, .emit_fence = gfx_v11_0_ring_emit_fence, @@ -6879,6 +6950,9 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = { .soft_recovery = gfx_v11_0_ring_soft_recovery, .emit_mem_sync = gfx_v11_0_emit_mem_sync, .reset = gfx_v11_0_reset_kcq, + .emit_cleaner_shader = gfx_v11_0_ring_emit_cleaner_shader, + .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use, + .end_use = amdgpu_gfx_enforce_isolation_ring_end_use, }; static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3_cleaner_shader.asm b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3_cleaner_shader.asm new file mode 100644 index 000000000000..9b90b66368c7 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3_cleaner_shader.asm @@ -0,0 +1,118 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +// This shader is to clean LDS, SGPRs and VGPRs. It is first 64 Dwords or 256 bytes of 192 Dwords cleaner shader. +//To turn this shader program on for complitaion change this to main and lower shader main to main_1 + +// Navi3 : Clear SGPRs, VGPRs and LDS +// Launch 32 waves per CU (16 per SIMD) as a workgroup (threadgroup) to fill every wave slot +// Waves are "wave32" and have 64 VGPRs each, which uses all 1024 VGPRs per SIMD +// Waves are launched in "CU" mode, and the workgroup shares 64KB of LDS (half of the WGP's LDS) +// It takes 2 workgroups to use all of LDS: one on each CU of the WGP +// Each wave clears SGPRs 0 - 107 +// Each wave clears VGPRs 0 - 63 +// The first wave of the workgroup clears its 64KB of LDS +// The shader starts with "S_BARRIER" to ensure SPI has launched all waves of the workgroup +// before any wave in the workgroup could end. Without this, it is possible not all SGPRs get cleared. + +shader main + asic(GFX11) + type(CS) + wave_size(32) +// Note: original source code from SQ team + +// Takes about 2500 clocks to run. +// (theorhetical fastest = 1024clks vgpr + 640lds = 1660 clks) +// + S_BARRIER + + // + // CLEAR VGPRs + // + s_mov_b32 m0, 0x00000058 // Loop 96/8=12 times (loop unrolled for performance) + +label_0005: + v_movreld_b32 v0, 0 + v_movreld_b32 v1, 0 + v_movreld_b32 v2, 0 + v_movreld_b32 v3, 0 + v_movreld_b32 v4, 0 + v_movreld_b32 v5, 0 + v_movreld_b32 v6, 0 + v_movreld_b32 v7, 0 + s_sub_u32 m0, m0, 8 + s_cbranch_scc0 label_0005 + // + // + + s_mov_b32 s2, 0x80000000 // Bit31 is first_wave + s_and_b32 s2, s2, s0 // sgpr0 has tg_size (first_wave) term as in ucode only COMPUTE_PGM_RSRC2.tg_size_en is set + s_cbranch_scc0 label_0023 // Clean LDS if its first wave of ThreadGroup/WorkGroup + // CLEAR LDS + // + s_mov_b32 exec_lo, 0xffffffff + s_mov_b32 exec_hi, 0xffffffff + v_mbcnt_lo_u32_b32 v1, exec_hi, 0 // Set V1 to thread-ID (0..63) + v_mbcnt_hi_u32_b32 v1, exec_lo, v1 // Set V1 to thread-ID (0..63) + v_mul_u32_u24 v1, 0x00000008, v1 // * 8, so each thread is a double-dword address (8byte) + s_mov_b32 s2, 0x00000003f // 64 loop iterations + s_mov_b32 m0, 0xffffffff + // Clear all of LDS space + // Each FirstWave of WorkGroup clears 64kbyte block + +label_001F: + ds_write2_b64 v1, v[2:3], v[2:3] offset1:32 + ds_write2_b64 v1, v[4:5], v[4:5] offset0:64 offset1:96 + v_add_co_u32 v1, vcc, 0x00000400, v1 + s_sub_u32 s2, s2, 1 + s_cbranch_scc0 label_001F + // + // CLEAR SGPRs + // +label_0023: + s_mov_b32 m0, 0x00000068 // Loop 108/4=27 times (loop unrolled for performance) +label_sgpr_loop: + s_movreld_b32 s0, 0 + s_movreld_b32 s1, 0 + s_movreld_b32 s2, 0 + s_movreld_b32 s3, 0 + s_sub_u32 m0, m0, 4 + s_cbranch_scc0 label_sgpr_loop + + //clear vcc + s_mov_b64 vcc, 0 //clear vcc + s_mov_b32 flat_scratch_lo, 0 //clear flat scratch lo SGPR + s_mov_b32 flat_scratch_hi, 0 //clear flat scratch hi SGPR + s_mov_b64 ttmp0, 0 //Clear ttmp0 and ttmp1 + s_mov_b64 ttmp2, 0 //Clear ttmp2 and ttmp3 + s_mov_b64 ttmp4, 0 //Clear ttmp4 and ttmp5 + s_mov_b64 ttmp6, 0 //Clear ttmp6 and ttmp7 + s_mov_b64 ttmp8, 0 //Clear ttmp8 and ttmp9 + s_mov_b64 ttmp10, 0 //Clear ttmp10 and ttmp11 + s_mov_b64 ttmp12, 0 //Clear ttmp12 and ttmp13 + s_mov_b64 ttmp14, 0 //Clear ttmp14 and ttmp15 + + s_endpgm + +end + diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_cleaner_shader.h b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_cleaner_shader.h new file mode 100644 index 000000000000..3218cc04f543 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_cleaner_shader.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +/* Define the cleaner shader gfx_11_0_3 */ +static const u32 gfx_11_0_3_cleaner_shader_hex[] = { + 0xb0804006, 0xbe8200ff, + 0x00000058, 0xbefd0080, + 0x7e008480, 0x7e028480, + 0x7e048480, 0x7e068480, + 0x7e088480, 0x7e0a8480, + 0x7e0c8480, 0x7e0e8480, + 0xbefd0002, 0x80828802, + 0xbfa1fff5, 0xbe8200ff, + 0x80000000, 0x8b020002, + 0xbfa10012, 0xbefe00c1, + 0xbeff00c1, 0xd71f0001, + 0x0001007f, 0xd7200001, + 0x0002027e, 0x16020288, + 0xbe8200bf, 0xbefd00c1, + 0xd9382000, 0x00020201, + 0xd9386040, 0x00040401, + 0xd7006a01, 0x000202ff, + 0x00000400, 0x80828102, + 0xbfa1fff7, 0xbefd00ff, + 0x00000068, 0xbe804280, + 0xbe814280, 0xbe824280, + 0xbe834280, 0x80fd847d, + 0xbfa1fffa, 0xbeea0180, + 0xbeec0180, 0xbeee0180, + 0xbef00180, 0xbef20180, + 0xbef40180, 0xbef60180, + 0xbef80180, 0xbefa0180, + 0xbfb00000, 0xbf9f0000, + 0xbf9f0000, 0xbf9f0000, + 0xbf9f0000, 0xbf9f0000, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c index 47b47d21f464..fe7c48f2fb2a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c @@ -1319,12 +1319,12 @@ static void gfx_v12_0_alloc_ip_dump(struct amdgpu_device *adev) } } -static int gfx_v12_0_sw_init(void *handle) +static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block) { int i, j, k, r, ring_id = 0; unsigned num_compute_rings; int xcc_id = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(12, 0, 0): @@ -1346,6 +1346,12 @@ static int gfx_v12_0_sw_init(void *handle) break; } + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { + default: + adev->gfx.enable_cleaner_shader = false; + break; + } + /* recalculate compute rings to use based on hardware configuration */ num_compute_rings = (adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_queue_per_pipe) / 2; @@ -1431,6 +1437,12 @@ static int gfx_v12_0_sw_init(void *handle) } } + /* TODO: Add queue reset mask when FW fully supports it */ + adev->gfx.gfx_supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]); + adev->gfx.compute_supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]); + if (!adev->enable_mes_kiq) { r = amdgpu_gfx_kiq_init(adev, GFX12_MEC_HPD_SIZE, 0); if (r) { @@ -1460,6 +1472,10 @@ static int gfx_v12_0_sw_init(void *handle) gfx_v12_0_alloc_ip_dump(adev); + r = amdgpu_gfx_sysfs_init(adev); + if (r) + return r; + return 0; } @@ -1492,10 +1508,10 @@ static void gfx_v12_0_rlc_autoload_buffer_fini(struct amdgpu_device *adev) (void **)&adev->gfx.rlc.rlc_autoload_ptr); } -static int gfx_v12_0_sw_fini(void *handle) +static int gfx_v12_0_sw_fini(struct amdgpu_ip_block *ip_block) { int i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->gfx.num_gfx_rings; i++) amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); @@ -1519,6 +1535,8 @@ static int gfx_v12_0_sw_fini(void *handle) gfx_v12_0_free_microcode(adev); + amdgpu_gfx_sysfs_fini(adev); + kfree(adev->gfx.ip_dump_core); kfree(adev->gfx.ip_dump_compute_queues); kfree(adev->gfx.ip_dump_gfx_queues); @@ -2601,7 +2619,7 @@ static int gfx_v12_0_cp_gfx_resume(struct amdgpu_device *adev) WREG32_SOC15(GC, 0, regCP_RB0_WPTR, lower_32_bits(ring->wptr)); WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI, upper_32_bits(ring->wptr)); - /* set the wb address wether it's enabled or not */ + /* set the wb address whether it's enabled or not */ rptr_addr = ring->rptr_gpu_addr; WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr)); WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & @@ -3513,10 +3531,10 @@ static void gfx_v12_0_init_golden_registers(struct amdgpu_device *adev) } } -static int gfx_v12_0_hw_init(void *handle) +static int gfx_v12_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) { @@ -3603,9 +3621,9 @@ static int gfx_v12_0_hw_init(void *handle) return r; } -static int gfx_v12_0_hw_fini(void *handle) +static int gfx_v12_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t tmp; amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); @@ -3643,14 +3661,14 @@ static int gfx_v12_0_hw_fini(void *handle) return 0; } -static int gfx_v12_0_suspend(void *handle) +static int gfx_v12_0_suspend(struct amdgpu_ip_block *ip_block) { - return gfx_v12_0_hw_fini(handle); + return gfx_v12_0_hw_fini(ip_block); } -static int gfx_v12_0_resume(void *handle) +static int gfx_v12_0_resume(struct amdgpu_ip_block *ip_block) { - return gfx_v12_0_hw_init(handle); + return gfx_v12_0_hw_init(ip_block); } static bool gfx_v12_0_is_idle(void *handle) @@ -3664,11 +3682,11 @@ static bool gfx_v12_0_is_idle(void *handle) return true; } -static int gfx_v12_0_wait_for_idle(void *handle) +static int gfx_v12_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { /* read MC_STATUS */ @@ -3695,9 +3713,9 @@ static uint64_t gfx_v12_0_get_gpu_clock_counter(struct amdgpu_device *adev) return clock; } -static int gfx_v12_0_early_init(void *handle) +static int gfx_v12_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->gfx.funcs = &gfx_v12_0_gfx_funcs; @@ -3717,9 +3735,9 @@ static int gfx_v12_0_early_init(void *handle) return gfx_v12_0_init_microcode(adev); } -static int gfx_v12_0_late_init(void *handle) +static int gfx_v12_0_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); @@ -5022,8 +5040,6 @@ static void gfx_v12_0_emit_mem_sync(struct amdgpu_ring *ring) static void gfx_v12_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop) { - int i; - /* Header itself is a NOP packet */ if (num_nop == 1) { amdgpu_ring_write(ring, ring->funcs->nop); @@ -5034,13 +5050,19 @@ static void gfx_v12_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop) amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe))); /* Header is at index 0, followed by num_nops - 1 NOP packet's */ - for (i = 1; i < num_nop; i++) - amdgpu_ring_write(ring, ring->funcs->nop); + amdgpu_ring_insert_nop(ring, num_nop - 1); } -static void gfx_v12_ip_print(void *handle, struct drm_printer *p) +static void gfx_v12_0_ring_emit_cleaner_shader(struct amdgpu_ring *ring) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* Emit the cleaner shader */ + amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0)); + amdgpu_ring_write(ring, 0); /* RESERVED field, programmed to zero */ +} + +static void gfx_v12_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p) +{ + struct amdgpu_device *adev = ip_block->adev; uint32_t i, j, k, reg, index = 0; uint32_t reg_count = ARRAY_SIZE(gc_reg_list_12_0); @@ -5102,9 +5124,9 @@ static void gfx_v12_ip_print(void *handle, struct drm_printer *p) } } -static void gfx_v12_ip_dump(void *handle) +static void gfx_v12_ip_dump(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t i, j, k, reg, index = 0; uint32_t reg_count = ARRAY_SIZE(gc_reg_list_12_0); @@ -5297,7 +5319,8 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_gfx = { 3 + /* CNTX_CTRL */ 5 + /* HDP_INVL */ 8 + 8 + /* FENCE x2 */ - 8, /* gfx_v12_0_emit_mem_sync */ + 8 + /* gfx_v12_0_emit_mem_sync */ + 2, /* gfx_v12_0_ring_emit_cleaner_shader */ .emit_ib_size = 4, /* gfx_v12_0_ring_emit_ib_gfx */ .emit_ib = gfx_v12_0_ring_emit_ib_gfx, .emit_fence = gfx_v12_0_ring_emit_fence, @@ -5318,6 +5341,9 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_gfx = { .soft_recovery = gfx_v12_0_ring_soft_recovery, .emit_mem_sync = gfx_v12_0_emit_mem_sync, .reset = gfx_v12_0_reset_kgq, + .emit_cleaner_shader = gfx_v12_0_ring_emit_cleaner_shader, + .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use, + .end_use = amdgpu_gfx_enforce_isolation_ring_end_use, }; static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_compute = { @@ -5336,7 +5362,8 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_compute = { SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 2 + /* gfx_v12_0_ring_emit_vm_flush */ 8 + 8 + 8 + /* gfx_v12_0_ring_emit_fence x3 for user fence, vm fence */ - 8, /* gfx_v12_0_emit_mem_sync */ + 8 + /* gfx_v12_0_emit_mem_sync */ + 2, /* gfx_v12_0_ring_emit_cleaner_shader */ .emit_ib_size = 7, /* gfx_v12_0_ring_emit_ib_compute */ .emit_ib = gfx_v12_0_ring_emit_ib_compute, .emit_fence = gfx_v12_0_ring_emit_fence, @@ -5353,6 +5380,9 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_compute = { .soft_recovery = gfx_v12_0_ring_soft_recovery, .emit_mem_sync = gfx_v12_0_emit_mem_sync, .reset = gfx_v12_0_reset_kcq, + .emit_cleaner_shader = gfx_v12_0_ring_emit_cleaner_shader, + .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use, + .end_use = amdgpu_gfx_enforce_isolation_ring_end_use, }; static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_kiq = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index 564f0b9336b6..41f50bf380c4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -3023,9 +3023,9 @@ static const struct amdgpu_rlc_funcs gfx_v6_0_rlc_funcs = { .start = gfx_v6_0_rlc_start }; -static int gfx_v6_0_early_init(void *handle) +static int gfx_v6_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->gfx.xcc_mask = 1; adev->gfx.num_gfx_rings = GFX6_NUM_GFX_RINGS; @@ -3039,10 +3039,10 @@ static int gfx_v6_0_early_init(void *handle) return 0; } -static int gfx_v6_0_sw_init(void *handle) +static int gfx_v6_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, r; r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq); @@ -3107,10 +3107,10 @@ static int gfx_v6_0_sw_init(void *handle) return r; } -static int gfx_v6_0_sw_fini(void *handle) +static int gfx_v6_0_sw_fini(struct amdgpu_ip_block *ip_block) { int i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->gfx.num_gfx_rings; i++) amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); @@ -3122,10 +3122,10 @@ static int gfx_v6_0_sw_fini(void *handle) return 0; } -static int gfx_v6_0_hw_init(void *handle) +static int gfx_v6_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; gfx_v6_0_constants_init(adev); @@ -3142,9 +3142,9 @@ static int gfx_v6_0_hw_init(void *handle) return r; } -static int gfx_v6_0_hw_fini(void *handle) +static int gfx_v6_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; gfx_v6_0_cp_enable(adev, false); adev->gfx.rlc.funcs->stop(adev); @@ -3153,18 +3153,14 @@ static int gfx_v6_0_hw_fini(void *handle) return 0; } -static int gfx_v6_0_suspend(void *handle) +static int gfx_v6_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return gfx_v6_0_hw_fini(adev); + return gfx_v6_0_hw_fini(ip_block); } -static int gfx_v6_0_resume(void *handle) +static int gfx_v6_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return gfx_v6_0_hw_init(adev); + return gfx_v6_0_hw_init(ip_block); } static bool gfx_v6_0_is_idle(void *handle) @@ -3177,24 +3173,19 @@ static bool gfx_v6_0_is_idle(void *handle) return true; } -static int gfx_v6_0_wait_for_idle(void *handle) +static int gfx_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { - if (gfx_v6_0_is_idle(handle)) + if (gfx_v6_0_is_idle(adev)) return 0; udelay(1); } return -ETIMEDOUT; } -static int gfx_v6_0_soft_reset(void *handle) -{ - return 0; -} - static void gfx_v6_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, enum amdgpu_interrupt_state state) { @@ -3444,7 +3435,6 @@ static void gfx_v6_0_emit_mem_sync(struct amdgpu_ring *ring) static const struct amd_ip_funcs gfx_v6_0_ip_funcs = { .name = "gfx_v6_0", .early_init = gfx_v6_0_early_init, - .late_init = NULL, .sw_init = gfx_v6_0_sw_init, .sw_fini = gfx_v6_0_sw_fini, .hw_init = gfx_v6_0_hw_init, @@ -3453,11 +3443,8 @@ static const struct amd_ip_funcs gfx_v6_0_ip_funcs = { .resume = gfx_v6_0_resume, .is_idle = gfx_v6_0_is_idle, .wait_for_idle = gfx_v6_0_wait_for_idle, - .soft_reset = gfx_v6_0_soft_reset, .set_clockgating_state = gfx_v6_0_set_clockgating_state, .set_powergating_state = gfx_v6_0_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index f146806c4633..824d5913103b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -2559,7 +2559,7 @@ static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev) ring->wptr = 0; WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr)); - /* set the wb address wether it's enabled or not */ + /* set the wb address whether it's enabled or not */ rptr_addr = ring->rptr_gpu_addr; WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr)); WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF); @@ -2876,7 +2876,7 @@ static void gfx_v7_0_mqd_init(struct amdgpu_device *adev, mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc; mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; - /* set the wb address wether it's enabled or not */ + /* set the wb address whether it's enabled or not */ wb_gpu_addr = ring->rptr_gpu_addr; mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc; mqd->cp_hqd_pq_rptr_report_addr_hi = @@ -4134,9 +4134,9 @@ static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = { .update_spm_vmid = gfx_v7_0_update_spm_vmid }; -static int gfx_v7_0_early_init(void *handle) +static int gfx_v7_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->gfx.xcc_mask = 1; adev->gfx.num_gfx_rings = GFX7_NUM_GFX_RINGS; @@ -4151,9 +4151,9 @@ static int gfx_v7_0_early_init(void *handle) return 0; } -static int gfx_v7_0_late_init(void *handle) +static int gfx_v7_0_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); @@ -4343,10 +4343,10 @@ static int gfx_v7_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, return 0; } -static int gfx_v7_0_sw_init(void *handle) +static int gfx_v7_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j, k, r, ring_id; switch (adev->asic_type) { @@ -4439,9 +4439,9 @@ static int gfx_v7_0_sw_init(void *handle) return r; } -static int gfx_v7_0_sw_fini(void *handle) +static int gfx_v7_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; for (i = 0; i < adev->gfx.num_gfx_rings; i++) @@ -4465,10 +4465,10 @@ static int gfx_v7_0_sw_fini(void *handle) return 0; } -static int gfx_v7_0_hw_init(void *handle) +static int gfx_v7_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; gfx_v7_0_constants_init(adev); @@ -4486,9 +4486,9 @@ static int gfx_v7_0_hw_init(void *handle) return r; } -static int gfx_v7_0_hw_fini(void *handle) +static int gfx_v7_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); @@ -4499,18 +4499,14 @@ static int gfx_v7_0_hw_fini(void *handle) return 0; } -static int gfx_v7_0_suspend(void *handle) +static int gfx_v7_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return gfx_v7_0_hw_fini(adev); + return gfx_v7_0_hw_fini(ip_block); } -static int gfx_v7_0_resume(void *handle) +static int gfx_v7_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return gfx_v7_0_hw_init(adev); + return gfx_v7_0_hw_init(ip_block); } static bool gfx_v7_0_is_idle(void *handle) @@ -4523,11 +4519,11 @@ static bool gfx_v7_0_is_idle(void *handle) return true; } -static int gfx_v7_0_wait_for_idle(void *handle) +static int gfx_v7_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { /* read MC_STATUS */ @@ -4540,11 +4536,11 @@ static int gfx_v7_0_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int gfx_v7_0_soft_reset(void *handle) +static int gfx_v7_0_soft_reset(struct amdgpu_ip_block *ip_block) { u32 grbm_soft_reset = 0, srbm_soft_reset = 0; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* GRBM_STATUS */ tmp = RREG32(mmGRBM_STATUS); @@ -5009,8 +5005,6 @@ static const struct amd_ip_funcs gfx_v7_0_ip_funcs = { .soft_reset = gfx_v7_0_soft_reset, .set_clockgating_state = gfx_v7_0_set_clockgating_state, .set_powergating_state = gfx_v7_0_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index bc8295812cc8..b7006c41e270 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -1894,12 +1894,12 @@ static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, static void gfx_v8_0_sq_irq_work_func(struct work_struct *work); -static int gfx_v8_0_sw_init(void *handle) +static int gfx_v8_0_sw_init(struct amdgpu_ip_block *ip_block) { int i, j, k, r, ring_id; int xcc_id = 0; struct amdgpu_ring *ring; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; switch (adev->asic_type) { case CHIP_TONGA: @@ -2037,9 +2037,9 @@ static int gfx_v8_0_sw_init(void *handle) return 0; } -static int gfx_v8_0_sw_fini(void *handle) +static int gfx_v8_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; for (i = 0; i < adev->gfx.num_gfx_rings; i++) @@ -4260,7 +4260,7 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev) ring->wptr = 0; WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr)); - /* set the wb address wether it's enabled or not */ + /* set the wb address whether it's enabled or not */ rptr_addr = ring->rptr_gpu_addr; WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr)); WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF); @@ -4783,10 +4783,10 @@ static void gfx_v8_0_cp_enable(struct amdgpu_device *adev, bool enable) gfx_v8_0_cp_compute_enable(adev, enable); } -static int gfx_v8_0_hw_init(void *handle) +static int gfx_v8_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; gfx_v8_0_init_golden_registers(adev); gfx_v8_0_constants_init(adev); @@ -4823,6 +4823,13 @@ static int gfx_v8_0_kcq_disable(struct amdgpu_device *adev) amdgpu_ring_write(kiq_ring, 0); amdgpu_ring_write(kiq_ring, 0); } + /* Submit unmap queue packet */ + amdgpu_ring_commit(kiq_ring); + /* + * Ring test will do a basic scratch register change check. Just run + * this to ensure that unmap queues that is submitted before got + * processed successfully before returning. + */ r = amdgpu_ring_test_helper(kiq_ring); if (r) DRM_ERROR("KCQ disable failed\n"); @@ -4865,13 +4872,13 @@ static int gfx_v8_0_wait_for_rlc_idle(void *handle) return -ETIMEDOUT; } -static int gfx_v8_0_wait_for_idle(void *handle) +static int gfx_v8_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned int i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { - if (gfx_v8_0_is_idle(handle)) + if (gfx_v8_0_is_idle(adev)) return 0; udelay(1); @@ -4879,9 +4886,9 @@ static int gfx_v8_0_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int gfx_v8_0_hw_fini(void *handle) +static int gfx_v8_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); @@ -4897,8 +4904,9 @@ static int gfx_v8_0_hw_fini(void *handle) pr_debug("For SRIOV client, shouldn't do anything.\n"); return 0; } + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); - if (!gfx_v8_0_wait_for_idle(adev)) + if (!gfx_v8_0_wait_for_idle(ip_block)) gfx_v8_0_cp_enable(adev, false); else pr_err("cp is busy, skip halt cp\n"); @@ -4911,19 +4919,19 @@ static int gfx_v8_0_hw_fini(void *handle) return 0; } -static int gfx_v8_0_suspend(void *handle) +static int gfx_v8_0_suspend(struct amdgpu_ip_block *ip_block) { - return gfx_v8_0_hw_fini(handle); + return gfx_v8_0_hw_fini(ip_block); } -static int gfx_v8_0_resume(void *handle) +static int gfx_v8_0_resume(struct amdgpu_ip_block *ip_block) { - return gfx_v8_0_hw_init(handle); + return gfx_v8_0_hw_init(ip_block); } -static bool gfx_v8_0_check_soft_reset(void *handle) +static bool gfx_v8_0_check_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 grbm_soft_reset = 0, srbm_soft_reset = 0; u32 tmp; @@ -4983,9 +4991,9 @@ static bool gfx_v8_0_check_soft_reset(void *handle) } } -static int gfx_v8_0_pre_soft_reset(void *handle) +static int gfx_v8_0_pre_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 grbm_soft_reset = 0; if ((!adev->gfx.grbm_soft_reset) && @@ -5024,9 +5032,9 @@ static int gfx_v8_0_pre_soft_reset(void *handle) return 0; } -static int gfx_v8_0_soft_reset(void *handle) +static int gfx_v8_0_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 grbm_soft_reset = 0, srbm_soft_reset = 0; u32 tmp; @@ -5086,9 +5094,9 @@ static int gfx_v8_0_soft_reset(void *handle) return 0; } -static int gfx_v8_0_post_soft_reset(void *handle) +static int gfx_v8_0_post_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 grbm_soft_reset = 0; if ((!adev->gfx.grbm_soft_reset) && @@ -5254,9 +5262,9 @@ static const struct amdgpu_gfx_funcs gfx_v8_0_gfx_funcs = { .select_me_pipe_q = &gfx_v8_0_select_me_pipe_q }; -static int gfx_v8_0_early_init(void *handle) +static int gfx_v8_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->gfx.xcc_mask = 1; adev->gfx.num_gfx_rings = GFX8_NUM_GFX_RINGS; @@ -5271,9 +5279,9 @@ static int gfx_v8_0_early_init(void *handle) return 0; } -static int gfx_v8_0_late_init(void *handle) +static int gfx_v8_0_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); @@ -6947,8 +6955,6 @@ static const struct amd_ip_funcs gfx_v8_0_ip_funcs = { .set_clockgating_state = gfx_v8_0_set_clockgating_state, .set_powergating_state = gfx_v8_0_set_powergating_state, .get_clockgating_state = gfx_v8_0_get_clockgating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 23f0573ae47b..0b6f09f2cc9b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -2198,12 +2198,12 @@ static void gfx_v9_0_alloc_ip_dump(struct amdgpu_device *adev) } } -static int gfx_v9_0_sw_init(void *handle) +static int gfx_v9_0_sw_init(struct amdgpu_ip_block *ip_block) { int i, j, k, r, ring_id; int xcc_id = 0; struct amdgpu_ring *ring; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; unsigned int hw_prio; switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { @@ -2223,6 +2223,18 @@ static int gfx_v9_0_sw_init(void *handle) } switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { + case IP_VERSION(9, 4, 2): + adev->gfx.cleaner_shader_ptr = gfx_9_4_2_cleaner_shader_hex; + adev->gfx.cleaner_shader_size = sizeof(gfx_9_4_2_cleaner_shader_hex); + if (adev->gfx.mec_fw_version >= 88) { + adev->gfx.enable_cleaner_shader = true; + r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size); + if (r) { + adev->gfx.enable_cleaner_shader = false; + dev_err(adev->dev, "Failed to initialize cleaner shader\n"); + } + } + break; default: adev->gfx.enable_cleaner_shader = false; break; @@ -2362,6 +2374,12 @@ static int gfx_v9_0_sw_init(void *handle) } } + /* TODO: Add queue reset mask when FW fully supports it */ + adev->gfx.gfx_supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]); + adev->gfx.compute_supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]); + r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, 0); if (r) { DRM_ERROR("Failed to init KIQ BOs!\n"); @@ -2390,7 +2408,7 @@ static int gfx_v9_0_sw_init(void *handle) gfx_v9_0_alloc_ip_dump(adev); - r = amdgpu_gfx_sysfs_isolation_shader_init(adev); + r = amdgpu_gfx_sysfs_init(adev); if (r) return r; @@ -2398,10 +2416,10 @@ static int gfx_v9_0_sw_init(void *handle) } -static int gfx_v9_0_sw_fini(void *handle) +static int gfx_v9_0_sw_fini(struct amdgpu_ip_block *ip_block) { int i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->gfx.mcbp && adev->gfx.num_gfx_rings) { for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++) @@ -2418,6 +2436,8 @@ static int gfx_v9_0_sw_fini(void *handle) amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring); amdgpu_gfx_kiq_fini(adev, 0); + amdgpu_gfx_cleaner_shader_sw_fini(adev); + gfx_v9_0_mec_fini(adev); amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, &adev->gfx.rlc.clear_state_gpu_addr, @@ -2429,7 +2449,7 @@ static int gfx_v9_0_sw_fini(void *handle) } gfx_v9_0_free_microcode(adev); - amdgpu_gfx_sysfs_isolation_shader_fini(adev); + amdgpu_gfx_sysfs_fini(adev); kfree(adev->gfx.ip_dump_core); kfree(adev->gfx.ip_dump_compute_queues); @@ -3184,6 +3204,15 @@ static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) { u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL); + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_INVALIDATE_ICACHE, enable ? 0 : 1); + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_INVALIDATE_ICACHE, enable ? 0 : 1); + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_INVALIDATE_ICACHE, enable ? 0 : 1); + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_PIPE0_RESET, enable ? 0 : 1); + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_PIPE1_RESET, enable ? 0 : 1); + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, enable ? 0 : 1); + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, enable ? 0 : 1); + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, enable ? 0 : 1); + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, enable ? 0 : 1); tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1); tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1); tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1); @@ -3265,8 +3294,8 @@ static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev) * confirmed that the APU gfx10/gfx11 needn't such update. */ if (adev->flags & AMD_IS_APU && - adev->in_s3 && !adev->suspend_complete) { - DRM_INFO(" Will skip the CSB packet resubmit\n"); + adev->in_s3 && !pm_resume_via_firmware()) { + DRM_INFO("Will skip the CSB packet resubmit\n"); return 0; } r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3); @@ -3346,7 +3375,7 @@ static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev) WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr)); WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr)); - /* set the wb address wether it's enabled or not */ + /* set the wb address whether it's enabled or not */ rptr_addr = ring->rptr_gpu_addr; WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr)); WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK); @@ -3393,7 +3422,15 @@ static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL, 0); } else { WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL, - (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); + (CP_MEC_CNTL__MEC_INVALIDATE_ICACHE_MASK | + CP_MEC_CNTL__MEC_ME1_PIPE0_RESET_MASK | + CP_MEC_CNTL__MEC_ME1_PIPE1_RESET_MASK | + CP_MEC_CNTL__MEC_ME1_PIPE2_RESET_MASK | + CP_MEC_CNTL__MEC_ME1_PIPE3_RESET_MASK | + CP_MEC_CNTL__MEC_ME2_PIPE0_RESET_MASK | + CP_MEC_CNTL__MEC_ME2_PIPE1_RESET_MASK | + CP_MEC_CNTL__MEC_ME1_HALT_MASK | + CP_MEC_CNTL__MEC_ME2_HALT_MASK)); adev->gfx.kiq[0].ring.sched.ready = false; } udelay(50); @@ -3914,6 +3951,10 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev) return r; } + if (adev->gfx.num_gfx_rings) + gfx_v9_0_cp_gfx_enable(adev, false); + gfx_v9_0_cp_compute_enable(adev, false); + r = gfx_v9_0_kiq_resume(adev); if (r) return r; @@ -3970,10 +4011,10 @@ static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable) gfx_v9_0_cp_compute_enable(adev, enable); } -static int gfx_v9_0_hw_init(void *handle) +static int gfx_v9_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size, adev->gfx.cleaner_shader_ptr); @@ -3999,9 +4040,9 @@ static int gfx_v9_0_hw_init(void *handle) return r; } -static int gfx_v9_0_hw_fini(void *handle) +static int gfx_v9_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0); @@ -4051,14 +4092,14 @@ static int gfx_v9_0_hw_fini(void *handle) return 0; } -static int gfx_v9_0_suspend(void *handle) +static int gfx_v9_0_suspend(struct amdgpu_ip_block *ip_block) { - return gfx_v9_0_hw_fini(handle); + return gfx_v9_0_hw_fini(ip_block); } -static int gfx_v9_0_resume(void *handle) +static int gfx_v9_0_resume(struct amdgpu_ip_block *ip_block) { - return gfx_v9_0_hw_init(handle); + return gfx_v9_0_hw_init(ip_block); } static bool gfx_v9_0_is_idle(void *handle) @@ -4072,24 +4113,24 @@ static bool gfx_v9_0_is_idle(void *handle) return true; } -static int gfx_v9_0_wait_for_idle(void *handle) +static int gfx_v9_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { - if (gfx_v9_0_is_idle(handle)) + if (gfx_v9_0_is_idle(adev)) return 0; udelay(1); } return -ETIMEDOUT; } -static int gfx_v9_0_soft_reset(void *handle) +static int gfx_v9_0_soft_reset(struct amdgpu_ip_block *ip_block) { u32 grbm_soft_reset = 0; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* GRBM_STATUS */ tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS); @@ -4745,9 +4786,9 @@ fail: return r; } -static int gfx_v9_0_early_init(void *handle) +static int gfx_v9_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->gfx.funcs = &gfx_v9_0_gfx_funcs; @@ -4771,9 +4812,9 @@ static int gfx_v9_0_early_init(void *handle) return gfx_v9_0_init_microcode(adev); } -static int gfx_v9_0_ecc_late_init(void *handle) +static int gfx_v9_0_ecc_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; /* @@ -4805,9 +4846,9 @@ static int gfx_v9_0_ecc_late_init(void *handle) return 0; } -static int gfx_v9_0_late_init(void *handle) +static int gfx_v9_0_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); @@ -4822,7 +4863,7 @@ static int gfx_v9_0_late_init(void *handle) if (r) return r; - r = gfx_v9_0_ecc_late_init(handle); + r = gfx_v9_0_ecc_late_init(ip_block); if (r) return r; @@ -7167,8 +7208,6 @@ static void gfx_v9_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable) static void gfx_v9_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop) { - int i; - /* Header itself is a NOP packet */ if (num_nop == 1) { amdgpu_ring_write(ring, ring->funcs->nop); @@ -7179,8 +7218,7 @@ static void gfx_v9_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop) amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe))); /* Header is at index 0, followed by num_nops - 1 NOP packet's */ - for (i = 1; i < num_nop; i++) - amdgpu_ring_write(ring, ring->funcs->nop); + amdgpu_ring_insert_nop(ring, num_nop - 1); } static int gfx_v9_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid) @@ -7237,10 +7275,6 @@ static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring, unsigned long flags; int i, r; - if (!adev->debug_exp_resets && - !adev->gfx.num_gfx_rings) - return -EINVAL; - if (amdgpu_sriov_vf(adev)) return -EINVAL; @@ -7316,9 +7350,9 @@ static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring, return amdgpu_ring_test_ring(ring); } -static void gfx_v9_ip_print(void *handle, struct drm_printer *p) +static void gfx_v9_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t i, j, k, reg, index = 0; uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9); @@ -7356,9 +7390,9 @@ static void gfx_v9_ip_print(void *handle, struct drm_printer *p) } -static void gfx_v9_ip_dump(void *handle) +static void gfx_v9_ip_dump(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t i, j, k, reg, index = 0; uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0_cleaner_shader.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0_cleaner_shader.h index 36c0292b5110..0b6bd09b7529 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0_cleaner_shader.h +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0_cleaner_shader.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: MIT */ /* - * Copyright 2018 Advanced Micro Devices, Inc. + * Copyright 2024 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -24,3 +24,45 @@ static const u32 __maybe_unused gfx_9_0_cleaner_shader_hex[] = { /* Add the cleaner shader code here */ }; + +/* Define the cleaner shader gfx_9_4_2 */ +static const u32 gfx_9_4_2_cleaner_shader_hex[] = { + 0xbf068100, 0xbf84003b, + 0xbf8a0000, 0xb07c0000, + 0xbe8200ff, 0x00000078, + 0xbf110802, 0x7e000280, + 0x7e020280, 0x7e040280, + 0x7e060280, 0x7e080280, + 0x7e0a0280, 0x7e0c0280, + 0x7e0e0280, 0x80828802, + 0xbe803202, 0xbf84fff5, + 0xbf9c0000, 0xbe8200ff, + 0x80000000, 0x86020102, + 0xbf840011, 0xbefe00c1, + 0xbeff00c1, 0xd28c0001, + 0x0001007f, 0xd28d0001, + 0x0002027e, 0x10020288, + 0xbe8200bf, 0xbefc00c1, + 0xd89c2000, 0x00020201, + 0xd89c6040, 0x00040401, + 0x320202ff, 0x00000400, + 0x80828102, 0xbf84fff8, + 0xbefc00ff, 0x0000005c, + 0xbf800000, 0xbe802c80, + 0xbe812c80, 0xbe822c80, + 0xbe832c80, 0x80fc847c, + 0xbf84fffa, 0xbee60080, + 0xbee70080, 0xbeea0180, + 0xbeec0180, 0xbeee0180, + 0xbef00180, 0xbef20180, + 0xbef40180, 0xbef60180, + 0xbef80180, 0xbefa0180, + 0xbf810000, 0xbf8d0001, + 0xbefc00ff, 0x0000005c, + 0xbf800000, 0xbe802c80, + 0xbe812c80, 0xbe822c80, + 0xbe832c80, 0x80fc847c, + 0xbf84fffa, 0xbee60080, + 0xbee70080, 0xbeea01ff, + 0x000000ee, 0xbf810000, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2_cleaner_shader.asm b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2_cleaner_shader.asm new file mode 100644 index 000000000000..35b8cf9070bd --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2_cleaner_shader.asm @@ -0,0 +1,153 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +// This shader is to clean LDS, SGPRs and VGPRs. It is first 64 Dwords or 256 bytes of 192 Dwords cleaner shader. +//To turn this shader program on for complitaion change this to main and lower shader main to main_1 + +// MI200 : Clear SGPRs, VGPRs and LDS +// Uses two kernels launched separately: +// 1. Clean VGPRs, LDS, and lower SGPRs +// Launches one workgroup per CU, each workgroup with 4x wave64 per SIMD in the CU +// Waves are "wave64" and have 128 VGPRs each, which uses all 512 VGPRs per SIMD +// Waves in the workgroup share the 64KB of LDS +// Each wave clears SGPRs 0 - 95. Because there are 4 waves/SIMD, this is physical SGPRs 0-383 +// Each wave clears 128 VGPRs, so all 512 in the SIMD +// The first wave of the workgroup clears its 64KB of LDS +// The shader starts with "S_BARRIER" to ensure SPI has launched all waves of the workgroup +// before any wave in the workgroup could end. Without this, it is possible not all SGPRs get cleared. +// 2. Clean remaining SGPRs +// Launches a workgroup with 24 waves per workgroup, yielding 6 waves per SIMD in each CU +// Waves are allocating 96 SGPRs +// CP sets up SPI_RESOURCE_RESERVE_* registers to prevent these waves from allocating SGPRs 0-223. +// As such, these 6 waves per SIMD are allocated physical SGPRs 224-799 +// Barriers do not work for >16 waves per workgroup, so we cannot start with S_BARRIER +// Instead, the shader starts with an S_SETHALT 1. Once all waves are launched CP will send unhalt command +// The shader then clears all SGPRs allocated to it, cleaning out physical SGPRs 224-799 + +shader main + asic(MI200) + type(CS) + wave_size(64) +// Note: original source code from SQ team + +// (theorhetical fastest = ~512clks vgpr + 1536 lds + ~128 sgpr = 2176 clks) + + s_cmp_eq_u32 s0, 1 // Bit0 is set, sgpr0 is set then clear VGPRS and LDS as FW set COMPUTE_USER_DATA_3 + s_cbranch_scc0 label_0023 // Clean VGPRs and LDS if sgpr0 of wave is set, scc = (s3 == 1) + S_BARRIER + + s_movk_i32 m0, 0x0000 + s_mov_b32 s2, 0x00000078 // Loop 128/8=16 times (loop unrolled for performance) + // + // CLEAR VGPRs + // + s_set_gpr_idx_on s2, 0x8 // enable Dest VGPR indexing +label_0005: + v_mov_b32 v0, 0 + v_mov_b32 v1, 0 + v_mov_b32 v2, 0 + v_mov_b32 v3, 0 + v_mov_b32 v4, 0 + v_mov_b32 v5, 0 + v_mov_b32 v6, 0 + v_mov_b32 v7, 0 + s_sub_u32 s2, s2, 8 + s_set_gpr_idx_idx s2 + s_cbranch_scc0 label_0005 + s_set_gpr_idx_off + + // + // + + s_mov_b32 s2, 0x80000000 // Bit31 is first_wave + s_and_b32 s2, s2, s1 // sgpr0 has tg_size (first_wave) term as in ucode only COMPUTE_PGM_RSRC2.tg_size_en is set + s_cbranch_scc0 label_clean_sgpr_1 // Clean LDS if its first wave of ThreadGroup/WorkGroup + // CLEAR LDS + // + s_mov_b32 exec_lo, 0xffffffff + s_mov_b32 exec_hi, 0xffffffff + v_mbcnt_lo_u32_b32 v1, exec_hi, 0 // Set V1 to thread-ID (0..63) + v_mbcnt_hi_u32_b32 v1, exec_lo, v1 // Set V1 to thread-ID (0..63) + v_mul_u32_u24 v1, 0x00000008, v1 // * 8, so each thread is a double-dword address (8byte) + s_mov_b32 s2, 0x00000003f // 64 loop iterations + s_mov_b32 m0, 0xffffffff + // Clear all of LDS space + // Each FirstWave of WorkGroup clears 64kbyte block + +label_001F: + ds_write2_b64 v1, v[2:3], v[2:3] offset1:32 + ds_write2_b64 v1, v[4:5], v[4:5] offset0:64 offset1:96 + v_add_co_u32 v1, vcc, 0x00000400, v1 + s_sub_u32 s2, s2, 1 + s_cbranch_scc0 label_001F + // + // CLEAR SGPRs + // +label_clean_sgpr_1: + s_mov_b32 m0, 0x0000005c // Loop 96/4=24 times (loop unrolled for performance) + s_nop 0 +label_sgpr_loop: + s_movreld_b32 s0, 0 + s_movreld_b32 s1, 0 + s_movreld_b32 s2, 0 + s_movreld_b32 s3, 0 + s_sub_u32 m0, m0, 4 + s_cbranch_scc0 label_sgpr_loop + + //clear vcc, flat scratch + s_mov_b32 flat_scratch_lo, 0 //clear flat scratch lo SGPR + s_mov_b32 flat_scratch_hi, 0 //clear flat scratch hi SGPR + s_mov_b64 vcc, 0 //clear vcc + s_mov_b64 ttmp0, 0 //Clear ttmp0 and ttmp1 + s_mov_b64 ttmp2, 0 //Clear ttmp2 and ttmp3 + s_mov_b64 ttmp4, 0 //Clear ttmp4 and ttmp5 + s_mov_b64 ttmp6, 0 //Clear ttmp6 and ttmp7 + s_mov_b64 ttmp8, 0 //Clear ttmp8 and ttmp9 + s_mov_b64 ttmp10, 0 //Clear ttmp10 and ttmp11 + s_mov_b64 ttmp12, 0 //Clear ttmp12 and ttmp13 + s_mov_b64 ttmp14, 0 //Clear ttmp14 and ttmp15 +s_endpgm + +label_0023: + + s_sethalt 1 + + s_mov_b32 m0, 0x0000005c // Loop 96/4=24 times (loop unrolled for performance) + s_nop 0 +label_sgpr_loop1: + + s_movreld_b32 s0, 0 + s_movreld_b32 s1, 0 + s_movreld_b32 s2, 0 + s_movreld_b32 s3, 0 + s_sub_u32 m0, m0, 4 + s_cbranch_scc0 label_sgpr_loop1 + + //clear vcc, flat scratch + s_mov_b32 flat_scratch_lo, 0 //clear flat scratch lo SGPR + s_mov_b32 flat_scratch_hi, 0 //clear flat scratch hi SGPR + s_mov_b64 vcc, 0xee //clear vcc + +s_endpgm +end + diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index c100845409f7..e2b3dda57030 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -1049,10 +1049,10 @@ static void gfx_v9_4_3_alloc_ip_dump(struct amdgpu_device *adev) } } -static int gfx_v9_4_3_sw_init(void *handle) +static int gfx_v9_4_3_sw_init(struct amdgpu_ip_block *ip_block) { int i, j, k, r, ring_id, xcc_id, num_xcc; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(9, 4, 3): @@ -1157,6 +1157,19 @@ static int gfx_v9_4_3_sw_init(void *handle) return r; } + adev->gfx.compute_supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]); + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { + case IP_VERSION(9, 4, 3): + case IP_VERSION(9, 4, 4): + if (adev->gfx.mec_fw_version >= 155) { + adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE; + } + break; + default: + break; + } r = gfx_v9_4_3_gpu_early_init(adev); if (r) return r; @@ -1165,26 +1178,19 @@ static int gfx_v9_4_3_sw_init(void *handle) if (r) return r; - - if (!amdgpu_sriov_vf(adev)) { - r = amdgpu_gfx_sysfs_init(adev); - if (r) - return r; - } - - gfx_v9_4_3_alloc_ip_dump(adev); - - r = amdgpu_gfx_sysfs_isolation_shader_init(adev); + r = amdgpu_gfx_sysfs_init(adev); if (r) return r; + gfx_v9_4_3_alloc_ip_dump(adev); + return 0; } -static int gfx_v9_4_3_sw_fini(void *handle) +static int gfx_v9_4_3_sw_fini(struct amdgpu_ip_block *ip_block) { int i, num_xcc; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; num_xcc = NUM_XCC(adev->gfx.xcc_mask); for (i = 0; i < adev->gfx.num_compute_rings * num_xcc; i++) @@ -1201,9 +1207,7 @@ static int gfx_v9_4_3_sw_fini(void *handle) gfx_v9_4_3_mec_fini(adev); amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj); gfx_v9_4_3_free_microcode(adev); - if (!amdgpu_sriov_vf(adev)) - amdgpu_gfx_sysfs_fini(adev); - amdgpu_gfx_sysfs_isolation_shader_fini(adev); + amdgpu_gfx_sysfs_fini(adev); kfree(adev->gfx.ip_dump_core); kfree(adev->gfx.ip_dump_compute_queues); @@ -1247,8 +1251,10 @@ static void gfx_v9_4_3_xcc_init_compute_vmid(struct amdgpu_device *adev, soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id)); mutex_unlock(&adev->srbm_mutex); - /* Initialize all compute VMIDs to have no GDS, GWS, or OA - acccess. These should be enabled by FW for target VMIDs. */ + /* + * Initialize all compute VMIDs to have no GDS, GWS, or OA + * access. These should be enabled by FW for target VMIDs. + */ for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_BASE, 2 * i, 0); WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_SIZE, 2 * i, 0); @@ -2343,10 +2349,10 @@ static void gfx_v9_4_3_xcc_fini(struct amdgpu_device *adev, int xcc_id) gfx_v9_4_3_xcc_cp_compute_enable(adev, false, xcc_id); } -static int gfx_v9_4_3_hw_init(void *handle) +static int gfx_v9_4_3_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size, adev->gfx.cleaner_shader_ptr); @@ -2367,9 +2373,9 @@ static int gfx_v9_4_3_hw_init(void *handle) return r; } -static int gfx_v9_4_3_hw_fini(void *handle) +static int gfx_v9_4_3_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, num_xcc; amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); @@ -2384,14 +2390,14 @@ static int gfx_v9_4_3_hw_fini(void *handle) return 0; } -static int gfx_v9_4_3_suspend(void *handle) +static int gfx_v9_4_3_suspend(struct amdgpu_ip_block *ip_block) { - return gfx_v9_4_3_hw_fini(handle); + return gfx_v9_4_3_hw_fini(ip_block); } -static int gfx_v9_4_3_resume(void *handle) +static int gfx_v9_4_3_resume(struct amdgpu_ip_block *ip_block) { - return gfx_v9_4_3_hw_init(handle); + return gfx_v9_4_3_hw_init(ip_block); } static bool gfx_v9_4_3_is_idle(void *handle) @@ -2408,24 +2414,24 @@ static bool gfx_v9_4_3_is_idle(void *handle) return true; } -static int gfx_v9_4_3_wait_for_idle(void *handle) +static int gfx_v9_4_3_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { - if (gfx_v9_4_3_is_idle(handle)) + if (gfx_v9_4_3_is_idle(adev)) return 0; udelay(1); } return -ETIMEDOUT; } -static int gfx_v9_4_3_soft_reset(void *handle) +static int gfx_v9_4_3_soft_reset(struct amdgpu_ip_block *ip_block) { u32 grbm_soft_reset = 0; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* GRBM_STATUS */ tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_STATUS); @@ -2509,9 +2515,9 @@ static void gfx_v9_4_3_ring_emit_gds_switch(struct amdgpu_ring *ring, (1 << (oa_size + oa_base)) - (1 << oa_base)); } -static int gfx_v9_4_3_early_init(void *handle) +static int gfx_v9_4_3_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev), AMDGPU_MAX_COMPUTE_RINGS); @@ -2527,9 +2533,9 @@ static int gfx_v9_4_3_early_init(void *handle) return gfx_v9_4_3_init_microcode(adev); } -static int gfx_v9_4_3_late_init(void *handle) +static int gfx_v9_4_3_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); @@ -3056,9 +3062,6 @@ static void gfx_v9_4_3_ring_soft_recovery(struct amdgpu_ring *ring, struct amdgpu_device *adev = ring->adev; uint32_t value = 0; - if (!adev->debug_exp_resets) - return; - value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03); value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); @@ -3574,9 +3577,6 @@ static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring, unsigned long flags; int r; - if (!adev->debug_exp_resets) - return -EINVAL; - if (amdgpu_sriov_vf(adev)) return -EINVAL; @@ -4567,8 +4567,6 @@ static void gfx_v9_4_3_enable_watchdog_timer(struct amdgpu_device *adev) static void gfx_v9_4_3_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop) { - int i; - /* Header itself is a NOP packet */ if (num_nop == 1) { amdgpu_ring_write(ring, ring->funcs->nop); @@ -4579,13 +4577,12 @@ static void gfx_v9_4_3_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_no amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe))); /* Header is at index 0, followed by num_nops - 1 NOP packet's */ - for (i = 1; i < num_nop; i++) - amdgpu_ring_write(ring, ring->funcs->nop); + amdgpu_ring_insert_nop(ring, num_nop - 1); } -static void gfx_v9_4_3_ip_print(void *handle, struct drm_printer *p) +static void gfx_v9_4_3_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t i, j, k; uint32_t xcc_id, xcc_offset, inst_offset; uint32_t num_xcc, reg, num_inst; @@ -4643,9 +4640,9 @@ static void gfx_v9_4_3_ip_print(void *handle, struct drm_printer *p) } } -static void gfx_v9_4_3_ip_dump(void *handle) +static void gfx_v9_4_3_ip_dump(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t i, j, k; uint32_t num_xcc, reg, num_inst; uint32_t xcc_id, xcc_offset, inst_offset; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index 9784a2892185..697599c46240 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -175,7 +175,10 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev, addr, entry->client_id, soc15_ih_clientid_name[entry->client_id]); - if (!amdgpu_sriov_vf(adev)) + /* Only print L2 fault status if the status register could be read and + * contains useful information + */ + if (status != 0) hub->vmhub_funcs->print_l2_protection_fault_status(adev, status); @@ -630,9 +633,9 @@ static void gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device *adev) } -static int gmc_v10_0_early_init(void *handle) +static int gmc_v10_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; gmc_v10_0_set_mmhub_funcs(adev); gmc_v10_0_set_gfxhub_funcs(adev); @@ -651,9 +654,9 @@ static int gmc_v10_0_early_init(void *handle) return 0; } -static int gmc_v10_0_late_init(void *handle) +static int gmc_v10_0_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_gmc_allocate_vm_inv_eng(adev); @@ -769,10 +772,10 @@ static int gmc_v10_0_gart_init(struct amdgpu_device *adev) return amdgpu_gart_table_vram_alloc(adev); } -static int gmc_v10_0_sw_init(void *handle) +static int gmc_v10_0_sw_init(struct amdgpu_ip_block *ip_block) { int r, vram_width = 0, vram_type = 0, vram_vendor = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->gfxhub.funcs->init(adev); @@ -920,9 +923,9 @@ static void gmc_v10_0_gart_fini(struct amdgpu_device *adev) amdgpu_gart_table_vram_free(adev); } -static int gmc_v10_0_sw_fini(void *handle) +static int gmc_v10_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_vm_manager_fini(adev); gmc_v10_0_gart_fini(adev); @@ -985,9 +988,9 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev) return 0; } -static int gmc_v10_0_hw_init(void *handle) +static int gmc_v10_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; adev->gmc.flush_pasid_uses_kiq = !amdgpu_emu_mode; @@ -1032,9 +1035,9 @@ static void gmc_v10_0_gart_disable(struct amdgpu_device *adev) adev->mmhub.funcs->gart_disable(adev); } -static int gmc_v10_0_hw_fini(void *handle) +static int gmc_v10_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; gmc_v10_0_gart_disable(adev); @@ -1053,25 +1056,22 @@ static int gmc_v10_0_hw_fini(void *handle) return 0; } -static int gmc_v10_0_suspend(void *handle) +static int gmc_v10_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - gmc_v10_0_hw_fini(adev); + gmc_v10_0_hw_fini(ip_block); return 0; } -static int gmc_v10_0_resume(void *handle) +static int gmc_v10_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = gmc_v10_0_hw_init(adev); + r = gmc_v10_0_hw_init(ip_block); if (r) return r; - amdgpu_vmid_reset_all(adev); + amdgpu_vmid_reset_all(ip_block->adev); return 0; } @@ -1082,17 +1082,12 @@ static bool gmc_v10_0_is_idle(void *handle) return true; } -static int gmc_v10_0_wait_for_idle(void *handle) +static int gmc_v10_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { /* There is no need to wait for MC idle in GMC v10.*/ return 0; } -static int gmc_v10_0_soft_reset(void *handle) -{ - return 0; -} - static int gmc_v10_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -1154,7 +1149,6 @@ const struct amd_ip_funcs gmc_v10_0_ip_funcs = { .resume = gmc_v10_0_resume, .is_idle = gmc_v10_0_is_idle, .wait_for_idle = gmc_v10_0_wait_for_idle, - .soft_reset = gmc_v10_0_soft_reset, .set_clockgating_state = gmc_v10_0_set_clockgating_state, .set_powergating_state = gmc_v10_0_set_powergating_state, .get_clockgating_state = gmc_v10_0_get_clockgating_state, diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c index 2797fd84432b..f893ab4c14df 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c @@ -144,7 +144,10 @@ static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev, dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n", addr, entry->client_id); - if (!amdgpu_sriov_vf(adev)) + /* Only print L2 fault status if the status register could be read and + * contains useful information + */ + if (status != 0) hub->vmhub_funcs->print_l2_protection_fault_status(adev, status); } @@ -601,9 +604,9 @@ static void gmc_v11_0_set_gfxhub_funcs(struct amdgpu_device *adev) } } -static int gmc_v11_0_early_init(void *handle) +static int gmc_v11_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; gmc_v11_0_set_gfxhub_funcs(adev); gmc_v11_0_set_mmhub_funcs(adev); @@ -622,9 +625,9 @@ static int gmc_v11_0_early_init(void *handle) return 0; } -static int gmc_v11_0_late_init(void *handle) +static int gmc_v11_0_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_gmc_allocate_vm_inv_eng(adev); @@ -729,10 +732,10 @@ static int gmc_v11_0_gart_init(struct amdgpu_device *adev) return amdgpu_gart_table_vram_alloc(adev); } -static int gmc_v11_0_sw_init(void *handle) +static int gmc_v11_0_sw_init(struct amdgpu_ip_block *ip_block) { int r, vram_width = 0, vram_type = 0, vram_vendor = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->mmhub.funcs->init(adev); @@ -849,9 +852,9 @@ static void gmc_v11_0_gart_fini(struct amdgpu_device *adev) amdgpu_gart_table_vram_free(adev); } -static int gmc_v11_0_sw_fini(void *handle) +static int gmc_v11_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_vm_manager_fini(adev); gmc_v11_0_gart_fini(adev); @@ -908,9 +911,9 @@ static int gmc_v11_0_gart_enable(struct amdgpu_device *adev) return 0; } -static int gmc_v11_0_hw_init(void *handle) +static int gmc_v11_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; adev->gmc.flush_pasid_uses_kiq = !amdgpu_emu_mode; @@ -940,9 +943,9 @@ static void gmc_v11_0_gart_disable(struct amdgpu_device *adev) adev->mmhub.funcs->gart_disable(adev); } -static int gmc_v11_0_hw_fini(void *handle) +static int gmc_v11_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) { /* full access mode, so don't touch any GMC register */ @@ -961,25 +964,22 @@ static int gmc_v11_0_hw_fini(void *handle) return 0; } -static int gmc_v11_0_suspend(void *handle) +static int gmc_v11_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - gmc_v11_0_hw_fini(adev); + gmc_v11_0_hw_fini(ip_block); return 0; } -static int gmc_v11_0_resume(void *handle) +static int gmc_v11_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = gmc_v11_0_hw_init(adev); + r = gmc_v11_0_hw_init(ip_block); if (r) return r; - amdgpu_vmid_reset_all(adev); + amdgpu_vmid_reset_all(ip_block->adev); return 0; } @@ -990,17 +990,12 @@ static bool gmc_v11_0_is_idle(void *handle) return true; } -static int gmc_v11_0_wait_for_idle(void *handle) +static int gmc_v11_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { /* There is no need to wait for MC idle in GMC v11.*/ return 0; } -static int gmc_v11_0_soft_reset(void *handle) -{ - return 0; -} - static int gmc_v11_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -1041,7 +1036,6 @@ const struct amd_ip_funcs gmc_v11_0_ip_funcs = { .resume = gmc_v11_0_resume, .is_idle = gmc_v11_0_is_idle, .wait_for_idle = gmc_v11_0_wait_for_idle, - .soft_reset = gmc_v11_0_soft_reset, .set_clockgating_state = gmc_v11_0_set_clockgating_state, .set_powergating_state = gmc_v11_0_set_powergating_state, .get_clockgating_state = gmc_v11_0_get_clockgating_state, diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c index edcb5351f8cc..d22b027fd0bb 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c @@ -137,7 +137,10 @@ static int gmc_v12_0_process_interrupt(struct amdgpu_device *adev, dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n", addr, entry->client_id); - if (!amdgpu_sriov_vf(adev)) + /* Only print L2 fault status if the status register could be read and + * contains useful information + */ + if (status != 0) hub->vmhub_funcs->print_l2_protection_fault_status(adev, status); } @@ -604,9 +607,9 @@ static void gmc_v12_0_set_gfxhub_funcs(struct amdgpu_device *adev) } } -static int gmc_v12_0_early_init(void *handle) +static int gmc_v12_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; gmc_v12_0_set_gfxhub_funcs(adev); gmc_v12_0_set_mmhub_funcs(adev); @@ -624,9 +627,9 @@ static int gmc_v12_0_early_init(void *handle) return 0; } -static int gmc_v12_0_late_init(void *handle) +static int gmc_v12_0_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_gmc_allocate_vm_inv_eng(adev); @@ -731,10 +734,10 @@ static int gmc_v12_0_gart_init(struct amdgpu_device *adev) return amdgpu_gart_table_vram_alloc(adev); } -static int gmc_v12_0_sw_init(void *handle) +static int gmc_v12_0_sw_init(struct amdgpu_ip_block *ip_block) { int r, vram_width = 0, vram_type = 0, vram_vendor = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->mmhub.funcs->init(adev); @@ -841,9 +844,9 @@ static void gmc_v12_0_gart_fini(struct amdgpu_device *adev) amdgpu_gart_table_vram_free(adev); } -static int gmc_v12_0_sw_fini(void *handle) +static int gmc_v12_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_vm_manager_fini(adev); gmc_v12_0_gart_fini(adev); @@ -894,10 +897,10 @@ static int gmc_v12_0_gart_enable(struct amdgpu_device *adev) return 0; } -static int gmc_v12_0_hw_init(void *handle) +static int gmc_v12_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* The sequence of these two function calls matters.*/ gmc_v12_0_init_golden_registers(adev); @@ -924,9 +927,9 @@ static void gmc_v12_0_gart_disable(struct amdgpu_device *adev) adev->mmhub.funcs->gart_disable(adev); } -static int gmc_v12_0_hw_fini(void *handle) +static int gmc_v12_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) { /* full access mode, so don't touch any GMC register */ @@ -945,25 +948,22 @@ static int gmc_v12_0_hw_fini(void *handle) return 0; } -static int gmc_v12_0_suspend(void *handle) +static int gmc_v12_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - gmc_v12_0_hw_fini(adev); + gmc_v12_0_hw_fini(ip_block); return 0; } -static int gmc_v12_0_resume(void *handle) +static int gmc_v12_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = gmc_v12_0_hw_init(adev); + r = gmc_v12_0_hw_init(ip_block); if (r) return r; - amdgpu_vmid_reset_all(adev); + amdgpu_vmid_reset_all(ip_block->adev); return 0; } @@ -974,17 +974,12 @@ static bool gmc_v12_0_is_idle(void *handle) return true; } -static int gmc_v12_0_wait_for_idle(void *handle) +static int gmc_v12_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { /* There is no need to wait for MC idle in GMC v11.*/ return 0; } -static int gmc_v12_0_soft_reset(void *handle) -{ - return 0; -} - static int gmc_v12_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -1025,7 +1020,6 @@ const struct amd_ip_funcs gmc_v12_0_ip_funcs = { .resume = gmc_v12_0_resume, .is_idle = gmc_v12_0_is_idle, .wait_for_idle = gmc_v12_0_wait_for_idle, - .soft_reset = gmc_v12_0_soft_reset, .set_clockgating_state = gmc_v12_0_set_clockgating_state, .set_powergating_state = gmc_v12_0_set_powergating_state, .get_clockgating_state = gmc_v12_0_get_clockgating_state, diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index d36725666b54..ca000b3d1afc 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -43,7 +43,7 @@ static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev); static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev); -static int gmc_v6_0_wait_for_idle(void *handle); +static int gmc_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block); MODULE_FIRMWARE("amdgpu/tahiti_mc.bin"); MODULE_FIRMWARE("amdgpu/pitcairn_mc.bin"); @@ -64,8 +64,13 @@ MODULE_FIRMWARE("amdgpu/si58_mc.bin"); static void gmc_v6_0_mc_stop(struct amdgpu_device *adev) { u32 blackout; + struct amdgpu_ip_block *ip_block; - gmc_v6_0_wait_for_idle((void *)adev); + ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GMC); + if (!ip_block) + return; + + gmc_v6_0_wait_for_idle(ip_block); blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) { @@ -213,6 +218,8 @@ static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev, static void gmc_v6_0_mc_program(struct amdgpu_device *adev) { int i, j; + struct amdgpu_ip_block *ip_block; + /* Initialize HDP */ for (i = 0, j = 0; i < 32; i++, j += 0x6) { @@ -224,7 +231,11 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev) } WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0); - if (gmc_v6_0_wait_for_idle((void *)adev)) + ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GMC); + if (!ip_block) + return; + + if (gmc_v6_0_wait_for_idle(ip_block)) dev_warn(adev->dev, "Wait for MC idle timedout !\n"); if (adev->mode_info.num_crtc) { @@ -251,7 +262,7 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev) WREG32(mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 22); WREG32(mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 22); - if (gmc_v6_0_wait_for_idle((void *)adev)) + if (gmc_v6_0_wait_for_idle(ip_block)) dev_warn(adev->dev, "Wait for MC idle timedout !\n"); } @@ -762,9 +773,9 @@ static int gmc_v6_0_convert_vram_type(int mc_seq_vram_type) } } -static int gmc_v6_0_early_init(void *handle) +static int gmc_v6_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; gmc_v6_0_set_gmc_funcs(adev); gmc_v6_0_set_irq_funcs(adev); @@ -772,9 +783,9 @@ static int gmc_v6_0_early_init(void *handle) return 0; } -static int gmc_v6_0_late_init(void *handle) +static int gmc_v6_0_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS) return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); @@ -799,10 +810,10 @@ static unsigned int gmc_v6_0_get_vbios_fb_size(struct amdgpu_device *adev) return size; } -static int gmc_v6_0_sw_init(void *handle) +static int gmc_v6_0_sw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask); @@ -876,9 +887,9 @@ static int gmc_v6_0_sw_init(void *handle) return 0; } -static int gmc_v6_0_sw_fini(void *handle) +static int gmc_v6_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_gem_force_release(adev); amdgpu_vm_manager_fini(adev); @@ -889,10 +900,10 @@ static int gmc_v6_0_sw_fini(void *handle) return 0; } -static int gmc_v6_0_hw_init(void *handle) +static int gmc_v6_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; gmc_v6_0_mc_program(adev); @@ -914,9 +925,9 @@ static int gmc_v6_0_hw_init(void *handle) return 0; } -static int gmc_v6_0_hw_fini(void *handle) +static int gmc_v6_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); gmc_v6_0_gart_disable(adev); @@ -924,21 +935,19 @@ static int gmc_v6_0_hw_fini(void *handle) return 0; } -static int gmc_v6_0_suspend(void *handle) +static int gmc_v6_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - gmc_v6_0_hw_fini(adev); + gmc_v6_0_hw_fini(ip_block); return 0; } -static int gmc_v6_0_resume(void *handle) +static int gmc_v6_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; - r = gmc_v6_0_hw_init(adev); + r = gmc_v6_0_hw_init(ip_block); if (r) return r; @@ -950,6 +959,7 @@ static int gmc_v6_0_resume(void *handle) static bool gmc_v6_0_is_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 tmp = RREG32(mmSRBM_STATUS); if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | @@ -959,13 +969,13 @@ static bool gmc_v6_0_is_idle(void *handle) return true; } -static int gmc_v6_0_wait_for_idle(void *handle) +static int gmc_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned int i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { - if (gmc_v6_0_is_idle(handle)) + if (gmc_v6_0_is_idle(adev)) return 0; udelay(1); } @@ -973,9 +983,10 @@ static int gmc_v6_0_wait_for_idle(void *handle) } -static int gmc_v6_0_soft_reset(void *handle) +static int gmc_v6_0_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; + u32 srbm_soft_reset = 0; u32 tmp = RREG32(mmSRBM_STATUS); @@ -992,7 +1003,8 @@ static int gmc_v6_0_soft_reset(void *handle) if (srbm_soft_reset) { gmc_v6_0_mc_stop(adev); - if (gmc_v6_0_wait_for_idle(adev)) + + if (gmc_v6_0_wait_for_idle(ip_block)) dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); tmp = RREG32(mmSRBM_SOFT_RESET); @@ -1109,8 +1121,6 @@ static const struct amd_ip_funcs gmc_v6_0_ip_funcs = { .soft_reset = gmc_v6_0_soft_reset, .set_clockgating_state = gmc_v6_0_set_clockgating_state, .set_powergating_state = gmc_v6_0_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 994432fb57ea..b6016f11956e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -52,7 +52,7 @@ static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev); static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev); -static int gmc_v7_0_wait_for_idle(void *handle); +static int gmc_v7_0_wait_for_idle(struct amdgpu_ip_block *ip_block); MODULE_FIRMWARE("amdgpu/bonaire_mc.bin"); MODULE_FIRMWARE("amdgpu/hawaii_mc.bin"); @@ -87,9 +87,14 @@ static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev) static void gmc_v7_0_mc_stop(struct amdgpu_device *adev) { + struct amdgpu_ip_block *ip_block; u32 blackout; - gmc_v7_0_wait_for_idle((void *)adev); + ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GMC); + if (!ip_block) + return; + + gmc_v7_0_wait_for_idle(ip_block); blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) { @@ -251,9 +256,14 @@ static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev, */ static void gmc_v7_0_mc_program(struct amdgpu_device *adev) { + struct amdgpu_ip_block *ip_block; u32 tmp; int i, j; + ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GMC); + if (!ip_block) + return; + /* Initialize HDP */ for (i = 0, j = 0; i < 32; i++, j += 0x6) { WREG32((0xb05 + j), 0x00000000); @@ -264,7 +274,7 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev) } WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0); - if (gmc_v7_0_wait_for_idle((void *)adev)) + if (gmc_v7_0_wait_for_idle(ip_block)) dev_warn(adev->dev, "Wait for MC idle timedout !\n"); if (adev->mode_info.num_crtc) { @@ -288,7 +298,7 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev) WREG32(mmMC_VM_AGP_BASE, 0); WREG32(mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 22); WREG32(mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 22); - if (gmc_v7_0_wait_for_idle((void *)adev)) + if (gmc_v7_0_wait_for_idle(ip_block)) dev_warn(adev->dev, "Wait for MC idle timedout !\n"); WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK); @@ -921,9 +931,9 @@ static int gmc_v7_0_convert_vram_type(int mc_seq_vram_type) } } -static int gmc_v7_0_early_init(void *handle) +static int gmc_v7_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; gmc_v7_0_set_gmc_funcs(adev); gmc_v7_0_set_irq_funcs(adev); @@ -940,9 +950,9 @@ static int gmc_v7_0_early_init(void *handle) return 0; } -static int gmc_v7_0_late_init(void *handle) +static int gmc_v7_0_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS) return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); @@ -968,10 +978,10 @@ static unsigned int gmc_v7_0_get_vbios_fb_size(struct amdgpu_device *adev) return size; } -static int gmc_v7_0_sw_init(void *handle) +static int gmc_v7_0_sw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask); @@ -1060,9 +1070,9 @@ static int gmc_v7_0_sw_init(void *handle) return 0; } -static int gmc_v7_0_sw_fini(void *handle) +static int gmc_v7_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_gem_force_release(adev); amdgpu_vm_manager_fini(adev); @@ -1074,10 +1084,10 @@ static int gmc_v7_0_sw_fini(void *handle) return 0; } -static int gmc_v7_0_hw_init(void *handle) +static int gmc_v7_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; gmc_v7_0_init_golden_registers(adev); @@ -1101,9 +1111,9 @@ static int gmc_v7_0_hw_init(void *handle) return 0; } -static int gmc_v7_0_hw_fini(void *handle) +static int gmc_v7_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); gmc_v7_0_gart_disable(adev); @@ -1111,25 +1121,22 @@ static int gmc_v7_0_hw_fini(void *handle) return 0; } -static int gmc_v7_0_suspend(void *handle) +static int gmc_v7_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - gmc_v7_0_hw_fini(adev); + gmc_v7_0_hw_fini(ip_block); return 0; } -static int gmc_v7_0_resume(void *handle) +static int gmc_v7_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = gmc_v7_0_hw_init(adev); + r = gmc_v7_0_hw_init(ip_block); if (r) return r; - amdgpu_vmid_reset_all(adev); + amdgpu_vmid_reset_all(ip_block->adev); return 0; } @@ -1146,11 +1153,11 @@ static bool gmc_v7_0_is_idle(void *handle) return true; } -static int gmc_v7_0_wait_for_idle(void *handle) +static int gmc_v7_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned int i; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { /* read MC_STATUS */ @@ -1167,9 +1174,9 @@ static int gmc_v7_0_wait_for_idle(void *handle) } -static int gmc_v7_0_soft_reset(void *handle) +static int gmc_v7_0_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 srbm_soft_reset = 0; u32 tmp = RREG32(mmSRBM_STATUS); @@ -1186,7 +1193,7 @@ static int gmc_v7_0_soft_reset(void *handle) if (srbm_soft_reset) { gmc_v7_0_mc_stop(adev); - if (gmc_v7_0_wait_for_idle((void *)adev)) + if (gmc_v7_0_wait_for_idle(ip_block)) dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); tmp = RREG32(mmSRBM_SOFT_RESET); @@ -1351,8 +1358,6 @@ static const struct amd_ip_funcs gmc_v7_0_ip_funcs = { .soft_reset = gmc_v7_0_soft_reset, .set_clockgating_state = gmc_v7_0_set_clockgating_state, .set_powergating_state = gmc_v7_0_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 86488c052f82..12d5967ecd45 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -53,7 +53,7 @@ static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev); static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev); -static int gmc_v8_0_wait_for_idle(void *handle); +static int gmc_v8_0_wait_for_idle(struct amdgpu_ip_block *ip_block); MODULE_FIRMWARE("amdgpu/tonga_mc.bin"); MODULE_FIRMWARE("amdgpu/polaris11_mc.bin"); @@ -170,8 +170,13 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev) static void gmc_v8_0_mc_stop(struct amdgpu_device *adev) { u32 blackout; + struct amdgpu_ip_block *ip_block; - gmc_v8_0_wait_for_idle(adev); + ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GMC); + if (!ip_block) + return; + + gmc_v8_0_wait_for_idle(ip_block); blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) { @@ -426,6 +431,7 @@ static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev, */ static void gmc_v8_0_mc_program(struct amdgpu_device *adev) { + struct amdgpu_ip_block *ip_block; u32 tmp; int i, j; @@ -439,7 +445,11 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev) } WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0); - if (gmc_v8_0_wait_for_idle((void *)adev)) + ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GMC); + if (!ip_block) + return; + + if (gmc_v8_0_wait_for_idle(ip_block)) dev_warn(adev->dev, "Wait for MC idle timedout !\n"); if (adev->mode_info.num_crtc) { @@ -474,7 +484,7 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev) WREG32(mmMC_VM_AGP_BASE, 0); WREG32(mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 22); WREG32(mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 22); - if (gmc_v8_0_wait_for_idle((void *)adev)) + if (gmc_v8_0_wait_for_idle(ip_block)) dev_warn(adev->dev, "Wait for MC idle timedout !\n"); WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK); @@ -1027,9 +1037,9 @@ static int gmc_v8_0_convert_vram_type(int mc_seq_vram_type) } } -static int gmc_v8_0_early_init(void *handle) +static int gmc_v8_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; gmc_v8_0_set_gmc_funcs(adev); gmc_v8_0_set_irq_funcs(adev); @@ -1046,9 +1056,9 @@ static int gmc_v8_0_early_init(void *handle) return 0; } -static int gmc_v8_0_late_init(void *handle) +static int gmc_v8_0_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS) return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); @@ -1076,10 +1086,10 @@ static unsigned int gmc_v8_0_get_vbios_fb_size(struct amdgpu_device *adev) #define mmMC_SEQ_MISC0_FIJI 0xA71 -static int gmc_v8_0_sw_init(void *handle) +static int gmc_v8_0_sw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask); @@ -1173,9 +1183,9 @@ static int gmc_v8_0_sw_init(void *handle) return 0; } -static int gmc_v8_0_sw_fini(void *handle) +static int gmc_v8_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_gem_force_release(adev); amdgpu_vm_manager_fini(adev); @@ -1187,10 +1197,10 @@ static int gmc_v8_0_sw_fini(void *handle) return 0; } -static int gmc_v8_0_hw_init(void *handle) +static int gmc_v8_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; gmc_v8_0_init_golden_registers(adev); @@ -1222,9 +1232,9 @@ static int gmc_v8_0_hw_init(void *handle) return 0; } -static int gmc_v8_0_hw_fini(void *handle) +static int gmc_v8_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); gmc_v8_0_gart_disable(adev); @@ -1232,25 +1242,22 @@ static int gmc_v8_0_hw_fini(void *handle) return 0; } -static int gmc_v8_0_suspend(void *handle) +static int gmc_v8_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - gmc_v8_0_hw_fini(adev); + gmc_v8_0_hw_fini(ip_block); return 0; } -static int gmc_v8_0_resume(void *handle) +static int gmc_v8_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = gmc_v8_0_hw_init(adev); + r = gmc_v8_0_hw_init(ip_block); if (r) return r; - amdgpu_vmid_reset_all(adev); + amdgpu_vmid_reset_all(ip_block->adev); return 0; } @@ -1267,11 +1274,11 @@ static bool gmc_v8_0_is_idle(void *handle) return true; } -static int gmc_v8_0_wait_for_idle(void *handle) +static int gmc_v8_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned int i; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { /* read MC_STATUS */ @@ -1289,10 +1296,10 @@ static int gmc_v8_0_wait_for_idle(void *handle) } -static bool gmc_v8_0_check_soft_reset(void *handle) +static bool gmc_v8_0_check_soft_reset(struct amdgpu_ip_block *ip_block) { u32 srbm_soft_reset = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 tmp = RREG32(mmSRBM_STATUS); if (tmp & SRBM_STATUS__VMC_BUSY_MASK) @@ -1316,23 +1323,23 @@ static bool gmc_v8_0_check_soft_reset(void *handle) return false; } -static int gmc_v8_0_pre_soft_reset(void *handle) +static int gmc_v8_0_pre_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (!adev->gmc.srbm_soft_reset) return 0; gmc_v8_0_mc_stop(adev); - if (gmc_v8_0_wait_for_idle(adev)) + if (gmc_v8_0_wait_for_idle(ip_block)) dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); return 0; } -static int gmc_v8_0_soft_reset(void *handle) +static int gmc_v8_0_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 srbm_soft_reset; if (!adev->gmc.srbm_soft_reset) @@ -1361,9 +1368,9 @@ static int gmc_v8_0_soft_reset(void *handle) return 0; } -static int gmc_v8_0_post_soft_reset(void *handle) +static int gmc_v8_0_post_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (!adev->gmc.srbm_soft_reset) return 0; @@ -1715,8 +1722,6 @@ static const struct amd_ip_funcs gmc_v8_0_ip_funcs = { .set_clockgating_state = gmc_v8_0_set_clockgating_state, .set_powergating_state = gmc_v8_0_set_powergating_state, .get_clockgating_state = gmc_v8_0_get_clockgating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index c76ac0dfe572..50c5da3020cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -672,6 +672,12 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2))) return 0; + /* Only print L2 fault status if the status register could be read and + * contains useful information + */ + if (!status) + return 0; + if (!amdgpu_sriov_vf(adev)) WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1); @@ -1124,8 +1130,10 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev, uint64_t *flags) { struct amdgpu_device *bo_adev = amdgpu_ttm_adev(bo->tbo.bdev); - bool is_vram = bo->tbo.resource->mem_type == TTM_PL_VRAM; - bool coherent = bo->flags & (AMDGPU_GEM_CREATE_COHERENT | AMDGPU_GEM_CREATE_EXT_COHERENT); + bool is_vram = bo->tbo.resource && + bo->tbo.resource->mem_type == TTM_PL_VRAM; + bool coherent = bo->flags & (AMDGPU_GEM_CREATE_COHERENT | + AMDGPU_GEM_CREATE_EXT_COHERENT); bool ext_coherent = bo->flags & AMDGPU_GEM_CREATE_EXT_COHERENT; bool uncached = bo->flags & AMDGPU_GEM_CREATE_UNCACHED; struct amdgpu_vm *vm = mapping->bo_va->base.vm; @@ -1133,6 +1141,8 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev, bool snoop = false; bool is_local; + dma_resv_assert_held(bo->tbo.base.resv); + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(9, 4, 1): case IP_VERSION(9, 4, 2): @@ -1251,9 +1261,8 @@ static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev, *flags &= ~AMDGPU_PTE_VALID; } - if (bo && bo->tbo.resource) - gmc_v9_0_get_coherence_flags(adev, mapping->bo_va->base.bo, - mapping, flags); + if ((*flags & AMDGPU_PTE_VALID) && bo) + gmc_v9_0_get_coherence_flags(adev, bo, mapping, flags); } static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev, @@ -1387,14 +1396,44 @@ gmc_v9_0_get_memory_partition(struct amdgpu_device *adev, u32 *supp_modes) } static enum amdgpu_memory_partition +gmc_v9_0_query_vf_memory_partition(struct amdgpu_device *adev) +{ + switch (adev->gmc.num_mem_partitions) { + case 0: + return UNKNOWN_MEMORY_PARTITION_MODE; + case 1: + return AMDGPU_NPS1_PARTITION_MODE; + case 2: + return AMDGPU_NPS2_PARTITION_MODE; + case 4: + return AMDGPU_NPS4_PARTITION_MODE; + default: + return AMDGPU_NPS1_PARTITION_MODE; + } + + return AMDGPU_NPS1_PARTITION_MODE; +} + +static enum amdgpu_memory_partition gmc_v9_0_query_memory_partition(struct amdgpu_device *adev) { if (amdgpu_sriov_vf(adev)) - return AMDGPU_NPS1_PARTITION_MODE; + return gmc_v9_0_query_vf_memory_partition(adev); return gmc_v9_0_get_memory_partition(adev, NULL); } +static bool gmc_v9_0_need_reset_on_init(struct amdgpu_device *adev) +{ + if (adev->nbio.funcs && adev->nbio.funcs->is_nps_switch_requested && + adev->nbio.funcs->is_nps_switch_requested(adev)) { + adev->gmc.reset_flags |= AMDGPU_GMC_INIT_RESET_NPS; + return true; + } + + return false; +} + static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = { .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb, .flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid, @@ -1406,6 +1445,8 @@ static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = { .override_vm_pte_flags = gmc_v9_0_override_vm_pte_flags, .get_vbios_fb_size = gmc_v9_0_get_vbios_fb_size, .query_mem_partition_mode = &gmc_v9_0_query_memory_partition, + .request_mem_partition_mode = &amdgpu_gmc_request_memory_partition, + .need_reset_on_init = &gmc_v9_0_need_reset_on_init, }; static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev) @@ -1545,9 +1586,31 @@ static void gmc_v9_0_set_xgmi_ras_funcs(struct amdgpu_device *adev) adev->gmc.xgmi.ras = &xgmi_ras; } -static int gmc_v9_0_early_init(void *handle) +static void gmc_v9_0_init_nps_details(struct amdgpu_device *adev) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + adev->gmc.supported_nps_modes = 0; + + if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU)) + return; + + /*TODO: Check PSP version also which supports NPS switch. Otherwise keep + * supported modes as 0. + */ + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { + case IP_VERSION(9, 4, 3): + case IP_VERSION(9, 4, 4): + adev->gmc.supported_nps_modes = + BIT(AMDGPU_NPS1_PARTITION_MODE) | + BIT(AMDGPU_NPS4_PARTITION_MODE); + break; + default: + break; + } +} + +static int gmc_v9_0_early_init(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; /* * 9.4.0, 9.4.1 and 9.4.3 don't have XGMI defined @@ -1601,9 +1664,9 @@ static int gmc_v9_0_early_init(void *handle) return 0; } -static int gmc_v9_0_late_init(void *handle) +static int gmc_v9_0_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_gmc_allocate_vm_inv_eng(adev); @@ -1900,6 +1963,8 @@ gmc_v9_0_init_sw_mem_ranges(struct amdgpu_device *adev, switch (mode) { case UNKNOWN_MEMORY_PARTITION_MODE: + adev->gmc.num_mem_partitions = 0; + break; case AMDGPU_NPS1_PARTITION_MODE: adev->gmc.num_mem_partitions = 1; break; @@ -1919,7 +1984,7 @@ gmc_v9_0_init_sw_mem_ranges(struct amdgpu_device *adev, /* Use NPS range info, if populated */ r = amdgpu_gmc_get_nps_memranges(adev, mem_ranges, - adev->gmc.num_mem_partitions); + &adev->gmc.num_mem_partitions); if (!r) { l = 0; for (i = 1; i < adev->gmc.num_mem_partitions; ++i) { @@ -1929,6 +1994,11 @@ gmc_v9_0_init_sw_mem_ranges(struct amdgpu_device *adev, } } else { + if (!adev->gmc.num_mem_partitions) { + dev_err(adev->dev, + "Not able to detect NPS mode, fall back to NPS1"); + adev->gmc.num_mem_partitions = 1; + } /* Fallback to sw based calculation */ size = (adev->gmc.real_vram_size + SZ_16M) >> AMDGPU_GPU_PAGE_SHIFT; size /= adev->gmc.num_mem_partitions; @@ -1987,10 +2057,10 @@ static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev) adev->gmc.vram_width = 128 * 64; } -static int gmc_v9_0_sw_init(void *handle) +static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block) { int r, vram_width = 0, vram_type = 0, vram_vendor = 0, dma_addr_bits; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; unsigned long inst_mask = adev->aid_mask; adev->gfxhub.funcs->init(adev); @@ -2165,6 +2235,7 @@ static int gmc_v9_0_sw_init(void *handle) if (r) return r; + gmc_v9_0_init_nps_details(adev); /* * number of VMs * VMID 0 is reserved for System @@ -2198,9 +2269,9 @@ static int gmc_v9_0_sw_init(void *handle) return 0; } -static int gmc_v9_0_sw_fini(void *handle) +static int gmc_v9_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) || amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) @@ -2308,9 +2379,9 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) return 0; } -static int gmc_v9_0_hw_init(void *handle) +static int gmc_v9_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; bool value; int i, r; @@ -2393,9 +2464,9 @@ static void gmc_v9_0_gart_disable(struct amdgpu_device *adev) adev->mmhub.funcs->gart_disable(adev); } -static int gmc_v9_0_hw_fini(void *handle) +static int gmc_v9_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; gmc_v9_0_gart_disable(adev); @@ -2413,32 +2484,44 @@ static int gmc_v9_0_hw_fini(void *handle) if (adev->mmhub.funcs->update_power_gating) adev->mmhub.funcs->update_power_gating(adev, false); - amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); + /* + * For minimal init, late_init is not called, hence VM fault/RAS irqs + * are not enabled. + */ + if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) { + amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); - if (adev->gmc.ecc_irq.funcs && - amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) - amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); + if (adev->gmc.ecc_irq.funcs && + amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) + amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); + } return 0; } -static int gmc_v9_0_suspend(void *handle) +static int gmc_v9_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return gmc_v9_0_hw_fini(adev); + return gmc_v9_0_hw_fini(ip_block); } -static int gmc_v9_0_resume(void *handle) +static int gmc_v9_0_resume(struct amdgpu_ip_block *ip_block) { + struct amdgpu_device *adev = ip_block->adev; int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = gmc_v9_0_hw_init(adev); + /* If a reset is done for NPS mode switch, read the memory range + * information again. + */ + if (adev->gmc.reset_flags & AMDGPU_GMC_INIT_RESET_NPS) { + gmc_v9_0_init_sw_mem_ranges(adev, adev->gmc.mem_partitions); + adev->gmc.reset_flags &= ~AMDGPU_GMC_INIT_RESET_NPS; + } + + r = gmc_v9_0_hw_init(ip_block); if (r) return r; - amdgpu_vmid_reset_all(adev); + amdgpu_vmid_reset_all(ip_block->adev); return 0; } @@ -2449,13 +2532,13 @@ static bool gmc_v9_0_is_idle(void *handle) return true; } -static int gmc_v9_0_wait_for_idle(void *handle) +static int gmc_v9_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { /* There is no need to wait for MC idle in GMC v9.*/ return 0; } -static int gmc_v9_0_soft_reset(void *handle) +static int gmc_v9_0_soft_reset(struct amdgpu_ip_block *ip_block) { /* XXX for emulation.*/ return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c index e019249883fb..194026e9be33 100644 --- a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c @@ -40,10 +40,12 @@ static void hdp_v4_0_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) { - if (!ring || !ring->funcs->emit_wreg) + if (!ring || !ring->funcs->emit_wreg) { WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); - else + RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2); + } else { amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); + } } static void hdp_v4_0_invalidate_hdp(struct amdgpu_device *adev, @@ -54,11 +56,13 @@ static void hdp_v4_0_invalidate_hdp(struct amdgpu_device *adev, amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(4, 4, 5)) return; - if (!ring || !ring->funcs->emit_wreg) + if (!ring || !ring->funcs->emit_wreg) { WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1); - else + RREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE); + } else { amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET( HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1); + } } static void hdp_v4_0_query_ras_error_count(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c index ed7facacf2fe..d3962d469088 100644 --- a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c @@ -31,10 +31,12 @@ static void hdp_v5_0_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) { - if (!ring || !ring->funcs->emit_wreg) + if (!ring || !ring->funcs->emit_wreg) { WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); - else + RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2); + } else { amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); + } } static void hdp_v5_0_invalidate_hdp(struct amdgpu_device *adev, @@ -42,6 +44,7 @@ static void hdp_v5_0_invalidate_hdp(struct amdgpu_device *adev, { if (!ring || !ring->funcs->emit_wreg) { WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1); + RREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE); } else { amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET( HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1); diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c index 29c3484ae1f1..f52552c5fa27 100644 --- a/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c +++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c @@ -31,13 +31,15 @@ static void hdp_v5_2_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) { - if (!ring || !ring->funcs->emit_wreg) + if (!ring || !ring->funcs->emit_wreg) { WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); - else + RREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2); + } else { amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); + } } static void hdp_v5_2_update_mem_power_gating(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c index 33736d361dd0..6948fe9956ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c @@ -34,10 +34,12 @@ static void hdp_v6_0_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) { - if (!ring || !ring->funcs->emit_wreg) + if (!ring || !ring->funcs->emit_wreg) { WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); - else + RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2); + } else { amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); + } } static void hdp_v6_0_update_clock_gating(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c index 1c99bb09e2a1..63820329f67e 100644 --- a/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c @@ -31,10 +31,12 @@ static void hdp_v7_0_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) { - if (!ring || !ring->funcs->emit_wreg) + if (!ring || !ring->funcs->emit_wreg) { WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); - else + RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2); + } else { amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); + } } static void hdp_v7_0_update_clock_gating(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c index 07984f7c3ae7..7f45e93c0397 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c @@ -273,9 +273,9 @@ static void iceland_ih_set_rptr(struct amdgpu_device *adev, WREG32(mmIH_RB_RPTR, ih->rptr); } -static int iceland_ih_early_init(void *handle) +static int iceland_ih_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int ret; ret = amdgpu_irq_add_domain(adev); @@ -287,10 +287,10 @@ static int iceland_ih_early_init(void *handle) return 0; } -static int iceland_ih_sw_init(void *handle) +static int iceland_ih_sw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false); if (r) @@ -301,9 +301,9 @@ static int iceland_ih_sw_init(void *handle) return r; } -static int iceland_ih_sw_fini(void *handle) +static int iceland_ih_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_fini_sw(adev); amdgpu_irq_remove_domain(adev); @@ -311,34 +311,28 @@ static int iceland_ih_sw_fini(void *handle) return 0; } -static int iceland_ih_hw_init(void *handle) +static int iceland_ih_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return iceland_ih_irq_init(adev); } -static int iceland_ih_hw_fini(void *handle) +static int iceland_ih_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - iceland_ih_irq_disable(adev); + iceland_ih_irq_disable(ip_block->adev); return 0; } -static int iceland_ih_suspend(void *handle) +static int iceland_ih_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return iceland_ih_hw_fini(adev); + return iceland_ih_hw_fini(ip_block); } -static int iceland_ih_resume(void *handle) +static int iceland_ih_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return iceland_ih_hw_init(adev); + return iceland_ih_hw_init(ip_block); } static bool iceland_ih_is_idle(void *handle) @@ -352,11 +346,11 @@ static bool iceland_ih_is_idle(void *handle) return true; } -static int iceland_ih_wait_for_idle(void *handle) +static int iceland_ih_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { /* read MC_STATUS */ @@ -368,10 +362,10 @@ static int iceland_ih_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int iceland_ih_soft_reset(void *handle) +static int iceland_ih_soft_reset(struct amdgpu_ip_block *ip_block) { u32 srbm_soft_reset = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 tmp = RREG32(mmSRBM_STATUS); if (tmp & SRBM_STATUS__IH_BUSY_MASK) @@ -413,7 +407,6 @@ static int iceland_ih_set_powergating_state(void *handle, static const struct amd_ip_funcs iceland_ih_ip_funcs = { .name = "iceland_ih", .early_init = iceland_ih_early_init, - .late_init = NULL, .sw_init = iceland_ih_sw_init, .sw_fini = iceland_ih_sw_fini, .hw_init = iceland_ih_hw_init, @@ -425,8 +418,6 @@ static const struct amd_ip_funcs iceland_ih_ip_funcs = { .soft_reset = iceland_ih_soft_reset, .set_clockgating_state = iceland_ih_set_clockgating_state, .set_powergating_state = iceland_ih_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ih_funcs iceland_ih_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c index 18a761d6ef33..38f953fd65d9 100644 --- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c @@ -559,19 +559,19 @@ static void ih_v6_0_set_self_irq_funcs(struct amdgpu_device *adev) adev->irq.self_irq.funcs = &ih_v6_0_self_irq_funcs; } -static int ih_v6_0_early_init(void *handle) +static int ih_v6_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; ih_v6_0_set_interrupt_funcs(adev); ih_v6_0_set_self_irq_funcs(adev); return 0; } -static int ih_v6_0_sw_init(void *handle) +static int ih_v6_0_sw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; bool use_bus_addr; r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_IH, 0, @@ -614,19 +614,19 @@ static int ih_v6_0_sw_init(void *handle) return r; } -static int ih_v6_0_sw_fini(void *handle) +static int ih_v6_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_fini_sw(adev); return 0; } -static int ih_v6_0_hw_init(void *handle) +static int ih_v6_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = ih_v6_0_irq_init(adev); if (r) @@ -635,27 +635,21 @@ static int ih_v6_0_hw_init(void *handle) return 0; } -static int ih_v6_0_hw_fini(void *handle) +static int ih_v6_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - ih_v6_0_irq_disable(adev); + ih_v6_0_irq_disable(ip_block->adev); return 0; } -static int ih_v6_0_suspend(void *handle) +static int ih_v6_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return ih_v6_0_hw_fini(adev); + return ih_v6_0_hw_fini(ip_block); } -static int ih_v6_0_resume(void *handle) +static int ih_v6_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return ih_v6_0_hw_init(adev); + return ih_v6_0_hw_init(ip_block); } static bool ih_v6_0_is_idle(void *handle) @@ -664,13 +658,13 @@ static bool ih_v6_0_is_idle(void *handle) return true; } -static int ih_v6_0_wait_for_idle(void *handle) +static int ih_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { /* todo */ return -ETIMEDOUT; } -static int ih_v6_0_soft_reset(void *handle) +static int ih_v6_0_soft_reset(struct amdgpu_ip_block *ip_block) { /* todo */ return 0; @@ -785,7 +779,6 @@ static void ih_v6_0_get_clockgating_state(void *handle, u64 *flags) static const struct amd_ip_funcs ih_v6_0_ip_funcs = { .name = "ih_v6_0", .early_init = ih_v6_0_early_init, - .late_init = NULL, .sw_init = ih_v6_0_sw_init, .sw_fini = ih_v6_0_sw_fini, .hw_init = ih_v6_0_hw_init, @@ -798,8 +791,6 @@ static const struct amd_ip_funcs ih_v6_0_ip_funcs = { .set_clockgating_state = ih_v6_0_set_clockgating_state, .set_powergating_state = ih_v6_0_set_powergating_state, .get_clockgating_state = ih_v6_0_get_clockgating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ih_funcs ih_v6_0_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c index 2e0469feca1e..61381e0c3795 100644 --- a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c @@ -532,9 +532,9 @@ static void ih_v6_1_set_self_irq_funcs(struct amdgpu_device *adev) adev->irq.self_irq.funcs = &ih_v6_1_self_irq_funcs; } -static int ih_v6_1_early_init(void *handle) +static int ih_v6_1_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int ret; ret = amdgpu_irq_add_domain(adev); @@ -547,10 +547,10 @@ static int ih_v6_1_early_init(void *handle) return 0; } -static int ih_v6_1_sw_init(void *handle) +static int ih_v6_1_sw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; bool use_bus_addr; r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_IH, 0, @@ -593,19 +593,19 @@ static int ih_v6_1_sw_init(void *handle) return r; } -static int ih_v6_1_sw_fini(void *handle) +static int ih_v6_1_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_fini_sw(adev); return 0; } -static int ih_v6_1_hw_init(void *handle) +static int ih_v6_1_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = ih_v6_1_irq_init(adev); if (r) @@ -614,27 +614,21 @@ static int ih_v6_1_hw_init(void *handle) return 0; } -static int ih_v6_1_hw_fini(void *handle) +static int ih_v6_1_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - ih_v6_1_irq_disable(adev); + ih_v6_1_irq_disable(ip_block->adev); return 0; } -static int ih_v6_1_suspend(void *handle) +static int ih_v6_1_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return ih_v6_1_hw_fini(adev); + return ih_v6_1_hw_fini(ip_block); } -static int ih_v6_1_resume(void *handle) +static int ih_v6_1_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return ih_v6_1_hw_init(adev); + return ih_v6_1_hw_init(ip_block); } static bool ih_v6_1_is_idle(void *handle) @@ -643,13 +637,13 @@ static bool ih_v6_1_is_idle(void *handle) return true; } -static int ih_v6_1_wait_for_idle(void *handle) +static int ih_v6_1_wait_for_idle(struct amdgpu_ip_block *ip_block) { /* todo */ return -ETIMEDOUT; } -static int ih_v6_1_soft_reset(void *handle) +static int ih_v6_1_soft_reset(struct amdgpu_ip_block *ip_block) { /* todo */ return 0; @@ -768,7 +762,6 @@ static void ih_v6_1_get_clockgating_state(void *handle, u64 *flags) static const struct amd_ip_funcs ih_v6_1_ip_funcs = { .name = "ih_v6_1", .early_init = ih_v6_1_early_init, - .late_init = NULL, .sw_init = ih_v6_1_sw_init, .sw_fini = ih_v6_1_sw_fini, .hw_init = ih_v6_1_hw_init, @@ -781,8 +774,6 @@ static const struct amd_ip_funcs ih_v6_1_ip_funcs = { .set_clockgating_state = ih_v6_1_set_clockgating_state, .set_powergating_state = ih_v6_1_set_powergating_state, .get_clockgating_state = ih_v6_1_get_clockgating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ih_funcs ih_v6_1_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c index 6852081fcff2..d2428cf5d385 100644 --- a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c @@ -528,19 +528,19 @@ static void ih_v7_0_set_self_irq_funcs(struct amdgpu_device *adev) adev->irq.self_irq.funcs = &ih_v7_0_self_irq_funcs; } -static int ih_v7_0_early_init(void *handle) +static int ih_v7_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; ih_v7_0_set_interrupt_funcs(adev); ih_v7_0_set_self_irq_funcs(adev); return 0; } -static int ih_v7_0_sw_init(void *handle) +static int ih_v7_0_sw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; bool use_bus_addr; r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_IH, 0, @@ -583,19 +583,19 @@ static int ih_v7_0_sw_init(void *handle) return r; } -static int ih_v7_0_sw_fini(void *handle) +static int ih_v7_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_fini_sw(adev); return 0; } -static int ih_v7_0_hw_init(void *handle) +static int ih_v7_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = ih_v7_0_irq_init(adev); if (r) @@ -604,27 +604,21 @@ static int ih_v7_0_hw_init(void *handle) return 0; } -static int ih_v7_0_hw_fini(void *handle) +static int ih_v7_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - ih_v7_0_irq_disable(adev); + ih_v7_0_irq_disable(ip_block->adev); return 0; } -static int ih_v7_0_suspend(void *handle) +static int ih_v7_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return ih_v7_0_hw_fini(adev); + return ih_v7_0_hw_fini(ip_block); } -static int ih_v7_0_resume(void *handle) +static int ih_v7_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return ih_v7_0_hw_init(adev); + return ih_v7_0_hw_init(ip_block); } static bool ih_v7_0_is_idle(void *handle) @@ -633,13 +627,13 @@ static bool ih_v7_0_is_idle(void *handle) return true; } -static int ih_v7_0_wait_for_idle(void *handle) +static int ih_v7_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { /* todo */ return -ETIMEDOUT; } -static int ih_v7_0_soft_reset(void *handle) +static int ih_v7_0_soft_reset(struct amdgpu_ip_block *ip_block) { /* todo */ return 0; @@ -758,7 +752,6 @@ static void ih_v7_0_get_clockgating_state(void *handle, u64 *flags) static const struct amd_ip_funcs ih_v7_0_ip_funcs = { .name = "ih_v7_0", .early_init = ih_v7_0_early_init, - .late_init = NULL, .sw_init = ih_v7_0_sw_init, .sw_fini = ih_v7_0_sw_fini, .hw_init = ih_v7_0_hw_init, @@ -771,8 +764,6 @@ static const struct amd_ip_funcs ih_v7_0_ip_funcs = { .set_clockgating_state = ih_v7_0_set_clockgating_state, .set_powergating_state = ih_v7_0_set_powergating_state, .get_clockgating_state = ih_v7_0_get_clockgating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ih_funcs ih_v7_0_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c index 6e0e88076224..03b8b7cd5229 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c @@ -458,13 +458,13 @@ static int jpeg_v1_0_process_interrupt(struct amdgpu_device *adev, /** * jpeg_v1_0_early_init - set function pointers * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers */ -int jpeg_v1_0_early_init(void *handle) +int jpeg_v1_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->jpeg.num_jpeg_inst = 1; adev->jpeg.num_jpeg_rings = 1; @@ -478,12 +478,12 @@ int jpeg_v1_0_early_init(void *handle) /** * jpeg_v1_0_sw_init - sw init for JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * */ -int jpeg_v1_0_sw_init(void *handle) +int jpeg_v1_0_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int r; @@ -509,13 +509,13 @@ int jpeg_v1_0_sw_init(void *handle) /** * jpeg_v1_0_sw_fini - sw fini for JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * JPEG free up sw allocation */ -void jpeg_v1_0_sw_fini(void *handle) +void jpeg_v1_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_ring_fini(adev->jpeg.inst->ring_dec); } diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h index 9654d22e0376..097328635083 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h @@ -24,9 +24,9 @@ #ifndef __JPEG_V1_0_H__ #define __JPEG_V1_0_H__ -int jpeg_v1_0_early_init(void *handle); -int jpeg_v1_0_sw_init(void *handle); -void jpeg_v1_0_sw_fini(void *handle); +int jpeg_v1_0_early_init(struct amdgpu_ip_block *ip_block); +int jpeg_v1_0_sw_init(struct amdgpu_ip_block *ip_block); +void jpeg_v1_0_sw_fini(struct amdgpu_ip_block *ip_block); void jpeg_v1_0_start(struct amdgpu_device *adev, int mode); #define JPEG_V1_REG_RANGE_START 0x8000 diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c index 41c0f8750dc1..6e29b69894a5 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c @@ -41,13 +41,13 @@ static int jpeg_v2_0_set_powergating_state(void *handle, /** * jpeg_v2_0_early_init - set function pointers * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers */ -static int jpeg_v2_0_early_init(void *handle) +static int jpeg_v2_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->jpeg.num_jpeg_inst = 1; adev->jpeg.num_jpeg_rings = 1; @@ -61,13 +61,13 @@ static int jpeg_v2_0_early_init(void *handle) /** * jpeg_v2_0_sw_init - sw init for JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Load firmware and sw initialization */ -static int jpeg_v2_0_sw_init(void *handle) +static int jpeg_v2_0_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int r; @@ -104,14 +104,14 @@ static int jpeg_v2_0_sw_init(void *handle) /** * jpeg_v2_0_sw_fini - sw fini for JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * JPEG suspend and free up sw allocation */ -static int jpeg_v2_0_sw_fini(void *handle) +static int jpeg_v2_0_sw_fini(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = amdgpu_jpeg_suspend(adev); if (r) @@ -125,12 +125,12 @@ static int jpeg_v2_0_sw_fini(void *handle) /** * jpeg_v2_0_hw_init - start and test JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * */ -static int jpeg_v2_0_hw_init(void *handle) +static int jpeg_v2_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec; adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, @@ -142,15 +142,15 @@ static int jpeg_v2_0_hw_init(void *handle) /** * jpeg_v2_0_hw_fini - stop the hardware block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the JPEG block, mark ring as not ready any more */ -static int jpeg_v2_0_hw_fini(void *handle) +static int jpeg_v2_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; - cancel_delayed_work_sync(&adev->vcn.idle_work); + cancel_delayed_work_sync(&adev->jpeg.idle_work); if (adev->jpeg.cur_state != AMD_PG_STATE_GATE && RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS)) @@ -162,20 +162,19 @@ static int jpeg_v2_0_hw_fini(void *handle) /** * jpeg_v2_0_suspend - suspend JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * HW fini and suspend JPEG block */ -static int jpeg_v2_0_suspend(void *handle) +static int jpeg_v2_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = jpeg_v2_0_hw_fini(adev); + r = jpeg_v2_0_hw_fini(ip_block); if (r) return r; - r = amdgpu_jpeg_suspend(adev); + r = amdgpu_jpeg_suspend(ip_block->adev); return r; } @@ -183,20 +182,19 @@ static int jpeg_v2_0_suspend(void *handle) /** * jpeg_v2_0_resume - resume JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Resume firmware and hw init JPEG block */ -static int jpeg_v2_0_resume(void *handle) +static int jpeg_v2_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_jpeg_resume(adev); + r = amdgpu_jpeg_resume(ip_block->adev); if (r) return r; - r = jpeg_v2_0_hw_init(adev); + r = jpeg_v2_0_hw_init(ip_block); return r; } @@ -666,9 +664,9 @@ static bool jpeg_v2_0_is_idle(void *handle) UVD_JRBC_STATUS__RB_JOB_DONE_MASK); } -static int jpeg_v2_0_wait_for_idle(void *handle) +static int jpeg_v2_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int ret; ret = SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_JRBC_STATUS, UVD_JRBC_STATUS__RB_JOB_DONE_MASK, @@ -744,7 +742,6 @@ static int jpeg_v2_0_process_interrupt(struct amdgpu_device *adev, static const struct amd_ip_funcs jpeg_v2_0_ip_funcs = { .name = "jpeg_v2_0", .early_init = jpeg_v2_0_early_init, - .late_init = NULL, .sw_init = jpeg_v2_0_sw_init, .sw_fini = jpeg_v2_0_sw_fini, .hw_init = jpeg_v2_0_hw_init, @@ -753,14 +750,8 @@ static const struct amd_ip_funcs jpeg_v2_0_ip_funcs = { .resume = jpeg_v2_0_resume, .is_idle = jpeg_v2_0_is_idle, .wait_for_idle = jpeg_v2_0_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = jpeg_v2_0_set_clockgating_state, .set_powergating_state = jpeg_v2_0_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs jpeg_v2_0_dec_ring_vm_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c index eedb9a829d95..9ac421486f05 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c @@ -50,13 +50,13 @@ static int amdgpu_ih_clientid_jpeg[] = { /** * jpeg_v2_5_early_init - set function pointers * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers */ -static int jpeg_v2_5_early_init(void *handle) +static int jpeg_v2_5_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 harvest; int i; @@ -81,15 +81,15 @@ static int jpeg_v2_5_early_init(void *handle) /** * jpeg_v2_5_sw_init - sw init for JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Load firmware and sw initialization */ -static int jpeg_v2_5_sw_init(void *handle) +static int jpeg_v2_5_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int i, r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { if (adev->jpeg.harvest_config & (1 << i)) @@ -153,14 +153,14 @@ static int jpeg_v2_5_sw_init(void *handle) /** * jpeg_v2_5_sw_fini - sw fini for JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * JPEG suspend and free up sw allocation */ -static int jpeg_v2_5_sw_fini(void *handle) +static int jpeg_v2_5_sw_fini(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = amdgpu_jpeg_suspend(adev); if (r) @@ -174,12 +174,12 @@ static int jpeg_v2_5_sw_fini(void *handle) /** * jpeg_v2_5_hw_init - start and test JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * */ -static int jpeg_v2_5_hw_init(void *handle) +static int jpeg_v2_5_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int i, r; @@ -202,16 +202,16 @@ static int jpeg_v2_5_hw_init(void *handle) /** * jpeg_v2_5_hw_fini - stop the hardware block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the JPEG block, mark ring as not ready any more */ -static int jpeg_v2_5_hw_fini(void *handle) +static int jpeg_v2_5_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; - cancel_delayed_work_sync(&adev->vcn.idle_work); + cancel_delayed_work_sync(&adev->jpeg.idle_work); for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { if (adev->jpeg.harvest_config & (1 << i)) @@ -231,20 +231,19 @@ static int jpeg_v2_5_hw_fini(void *handle) /** * jpeg_v2_5_suspend - suspend JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * HW fini and suspend JPEG block */ -static int jpeg_v2_5_suspend(void *handle) +static int jpeg_v2_5_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = jpeg_v2_5_hw_fini(adev); + r = jpeg_v2_5_hw_fini(ip_block); if (r) return r; - r = amdgpu_jpeg_suspend(adev); + r = amdgpu_jpeg_suspend(ip_block->adev); return r; } @@ -252,20 +251,19 @@ static int jpeg_v2_5_suspend(void *handle) /** * jpeg_v2_5_resume - resume JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Resume firmware and hw init JPEG block */ -static int jpeg_v2_5_resume(void *handle) +static int jpeg_v2_5_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = amdgpu_jpeg_resume(adev); + r = amdgpu_jpeg_resume(ip_block->adev); if (r) return r; - r = jpeg_v2_5_hw_init(adev); + r = jpeg_v2_5_hw_init(ip_block); return r; } @@ -501,9 +499,9 @@ static bool jpeg_v2_5_is_idle(void *handle) return ret; } -static int jpeg_v2_5_wait_for_idle(void *handle) +static int jpeg_v2_5_wait_for_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, ret; for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { @@ -615,7 +613,6 @@ static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev, static const struct amd_ip_funcs jpeg_v2_5_ip_funcs = { .name = "jpeg_v2_5", .early_init = jpeg_v2_5_early_init, - .late_init = NULL, .sw_init = jpeg_v2_5_sw_init, .sw_fini = jpeg_v2_5_sw_fini, .hw_init = jpeg_v2_5_hw_init, @@ -624,20 +621,13 @@ static const struct amd_ip_funcs jpeg_v2_5_ip_funcs = { .resume = jpeg_v2_5_resume, .is_idle = jpeg_v2_5_is_idle, .wait_for_idle = jpeg_v2_5_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = jpeg_v2_5_set_clockgating_state, .set_powergating_state = jpeg_v2_5_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amd_ip_funcs jpeg_v2_6_ip_funcs = { .name = "jpeg_v2_6", .early_init = jpeg_v2_5_early_init, - .late_init = NULL, .sw_init = jpeg_v2_5_sw_init, .sw_fini = jpeg_v2_5_sw_fini, .hw_init = jpeg_v2_5_hw_init, @@ -646,14 +636,8 @@ static const struct amd_ip_funcs jpeg_v2_6_ip_funcs = { .resume = jpeg_v2_5_resume, .is_idle = jpeg_v2_5_is_idle, .wait_for_idle = jpeg_v2_5_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = jpeg_v2_5_set_clockgating_state, .set_powergating_state = jpeg_v2_5_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs jpeg_v2_5_dec_ring_vm_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c index b1e7fd25afbc..e0df6800502c 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c @@ -42,13 +42,13 @@ static int jpeg_v3_0_set_powergating_state(void *handle, /** * jpeg_v3_0_early_init - set function pointers * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers */ -static int jpeg_v3_0_early_init(void *handle) +static int jpeg_v3_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 harvest; @@ -75,13 +75,13 @@ static int jpeg_v3_0_early_init(void *handle) /** * jpeg_v3_0_sw_init - sw init for JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Load firmware and sw initialization */ -static int jpeg_v3_0_sw_init(void *handle) +static int jpeg_v3_0_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int r; @@ -118,13 +118,13 @@ static int jpeg_v3_0_sw_init(void *handle) /** * jpeg_v3_0_sw_fini - sw fini for JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * JPEG suspend and free up sw allocation */ -static int jpeg_v3_0_sw_fini(void *handle) +static int jpeg_v3_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_jpeg_suspend(adev); @@ -139,12 +139,12 @@ static int jpeg_v3_0_sw_fini(void *handle) /** * jpeg_v3_0_hw_init - start and test JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * */ -static int jpeg_v3_0_hw_init(void *handle) +static int jpeg_v3_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec; adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, @@ -156,15 +156,15 @@ static int jpeg_v3_0_hw_init(void *handle) /** * jpeg_v3_0_hw_fini - stop the hardware block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the JPEG block, mark ring as not ready any more */ -static int jpeg_v3_0_hw_fini(void *handle) +static int jpeg_v3_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; - cancel_delayed_work_sync(&adev->vcn.idle_work); + cancel_delayed_work_sync(&adev->jpeg.idle_work); if (adev->jpeg.cur_state != AMD_PG_STATE_GATE && RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS)) @@ -176,20 +176,19 @@ static int jpeg_v3_0_hw_fini(void *handle) /** * jpeg_v3_0_suspend - suspend JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * HW fini and suspend JPEG block */ -static int jpeg_v3_0_suspend(void *handle) +static int jpeg_v3_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = jpeg_v3_0_hw_fini(adev); + r = jpeg_v3_0_hw_fini(ip_block); if (r) return r; - r = amdgpu_jpeg_suspend(adev); + r = amdgpu_jpeg_suspend(ip_block->adev); return r; } @@ -197,20 +196,19 @@ static int jpeg_v3_0_suspend(void *handle) /** * jpeg_v3_0_resume - resume JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Resume firmware and hw init JPEG block */ -static int jpeg_v3_0_resume(void *handle) +static int jpeg_v3_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = amdgpu_jpeg_resume(adev); + r = amdgpu_jpeg_resume(ip_block->adev); if (r) return r; - r = jpeg_v3_0_hw_init(adev); + r = jpeg_v3_0_hw_init(ip_block); return r; } @@ -459,9 +457,9 @@ static bool jpeg_v3_0_is_idle(void *handle) return ret; } -static int jpeg_v3_0_wait_for_idle(void *handle) +static int jpeg_v3_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_JRBC_STATUS, UVD_JRBC_STATUS__RB_JOB_DONE_MASK, @@ -535,7 +533,6 @@ static int jpeg_v3_0_process_interrupt(struct amdgpu_device *adev, static const struct amd_ip_funcs jpeg_v3_0_ip_funcs = { .name = "jpeg_v3_0", .early_init = jpeg_v3_0_early_init, - .late_init = NULL, .sw_init = jpeg_v3_0_sw_init, .sw_fini = jpeg_v3_0_sw_fini, .hw_init = jpeg_v3_0_hw_init, @@ -544,14 +541,8 @@ static const struct amd_ip_funcs jpeg_v3_0_ip_funcs = { .resume = jpeg_v3_0_resume, .is_idle = jpeg_v3_0_is_idle, .wait_for_idle = jpeg_v3_0_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = jpeg_v3_0_set_clockgating_state, .set_powergating_state = jpeg_v3_0_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs jpeg_v3_0_dec_ring_vm_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c index 6c5c1a68a9b7..eca1963c33b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c @@ -48,13 +48,13 @@ static void jpeg_v4_0_dec_ring_set_wptr(struct amdgpu_ring *ring); /** * jpeg_v4_0_early_init - set function pointers * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers */ -static int jpeg_v4_0_early_init(void *handle) +static int jpeg_v4_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->jpeg.num_jpeg_inst = 1; @@ -70,13 +70,13 @@ static int jpeg_v4_0_early_init(void *handle) /** * jpeg_v4_0_sw_init - sw init for JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Load firmware and sw initialization */ -static int jpeg_v4_0_sw_init(void *handle) +static int jpeg_v4_0_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int r; @@ -123,6 +123,12 @@ static int jpeg_v4_0_sw_init(void *handle) r = amdgpu_jpeg_ras_sw_init(adev); if (r) return r; + /* TODO: Add queue reset mask when FW fully supports it */ + adev->jpeg.supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]); + r = amdgpu_jpeg_sysfs_reset_mask_init(adev); + if (r) + return r; return 0; } @@ -130,19 +136,20 @@ static int jpeg_v4_0_sw_init(void *handle) /** * jpeg_v4_0_sw_fini - sw fini for JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * JPEG suspend and free up sw allocation */ -static int jpeg_v4_0_sw_fini(void *handle) +static int jpeg_v4_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_jpeg_suspend(adev); if (r) return r; + amdgpu_jpeg_sysfs_reset_mask_fini(adev); r = amdgpu_jpeg_sw_fini(adev); return r; @@ -151,12 +158,12 @@ static int jpeg_v4_0_sw_fini(void *handle) /** * jpeg_v4_0_hw_init - start and test JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * */ -static int jpeg_v4_0_hw_init(void *handle) +static int jpeg_v4_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec; int r; @@ -187,15 +194,15 @@ static int jpeg_v4_0_hw_init(void *handle) /** * jpeg_v4_0_hw_fini - stop the hardware block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the JPEG block, mark ring as not ready any more */ -static int jpeg_v4_0_hw_fini(void *handle) +static int jpeg_v4_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; - cancel_delayed_work_sync(&adev->vcn.idle_work); + cancel_delayed_work_sync(&adev->jpeg.idle_work); if (!amdgpu_sriov_vf(adev)) { if (adev->jpeg.cur_state != AMD_PG_STATE_GATE && RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS)) @@ -210,20 +217,19 @@ static int jpeg_v4_0_hw_fini(void *handle) /** * jpeg_v4_0_suspend - suspend JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * HW fini and suspend JPEG block */ -static int jpeg_v4_0_suspend(void *handle) +static int jpeg_v4_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = jpeg_v4_0_hw_fini(adev); + r = jpeg_v4_0_hw_fini(ip_block); if (r) return r; - r = amdgpu_jpeg_suspend(adev); + r = amdgpu_jpeg_suspend(ip_block->adev); return r; } @@ -231,20 +237,19 @@ static int jpeg_v4_0_suspend(void *handle) /** * jpeg_v4_0_resume - resume JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Resume firmware and hw init JPEG block */ -static int jpeg_v4_0_resume(void *handle) +static int jpeg_v4_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = amdgpu_jpeg_resume(adev); + r = amdgpu_jpeg_resume(ip_block->adev); if (r) return r; - r = jpeg_v4_0_hw_init(adev); + r = jpeg_v4_0_hw_init(ip_block); return r; } @@ -621,9 +626,9 @@ static bool jpeg_v4_0_is_idle(void *handle) return ret; } -static int jpeg_v4_0_wait_for_idle(void *handle) +static int jpeg_v4_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_JRBC_STATUS, UVD_JRBC_STATUS__RB_JOB_DONE_MASK, @@ -702,7 +707,6 @@ static int jpeg_v4_0_process_interrupt(struct amdgpu_device *adev, static const struct amd_ip_funcs jpeg_v4_0_ip_funcs = { .name = "jpeg_v4_0", .early_init = jpeg_v4_0_early_init, - .late_init = NULL, .sw_init = jpeg_v4_0_sw_init, .sw_fini = jpeg_v4_0_sw_fini, .hw_init = jpeg_v4_0_hw_init, @@ -711,14 +715,8 @@ static const struct amd_ip_funcs jpeg_v4_0_ip_funcs = { .resume = jpeg_v4_0_resume, .is_idle = jpeg_v4_0_is_idle, .wait_for_idle = jpeg_v4_0_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = jpeg_v4_0_set_clockgating_state, .set_powergating_state = jpeg_v4_0_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs jpeg_v4_0_dec_ring_vm_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c index 86958cb2c2ab..67b51bcbacd1 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c @@ -68,13 +68,13 @@ static inline bool jpeg_v4_0_3_normalizn_reqd(struct amdgpu_device *adev) /** * jpeg_v4_0_3_early_init - set function pointers * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers */ -static int jpeg_v4_0_3_early_init(void *handle) +static int jpeg_v4_0_3_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->jpeg.num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS; @@ -88,13 +88,13 @@ static int jpeg_v4_0_3_early_init(void *handle) /** * jpeg_v4_0_3_sw_init - sw init for JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Load firmware and sw initialization */ -static int jpeg_v4_0_3_sw_init(void *handle) +static int jpeg_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int i, j, r, jpeg_inst; @@ -159,25 +159,33 @@ static int jpeg_v4_0_3_sw_init(void *handle) } } + /* TODO: Add queue reset mask when FW fully supports it */ + adev->jpeg.supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]); + r = amdgpu_jpeg_sysfs_reset_mask_init(adev); + if (r) + return r; + return 0; } /** * jpeg_v4_0_3_sw_fini - sw fini for JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * JPEG suspend and free up sw allocation */ -static int jpeg_v4_0_3_sw_fini(void *handle) +static int jpeg_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_jpeg_suspend(adev); if (r) return r; + amdgpu_jpeg_sysfs_reset_mask_fini(adev); r = amdgpu_jpeg_sw_fini(adev); return r; @@ -299,12 +307,12 @@ static int jpeg_v4_0_3_start_sriov(struct amdgpu_device *adev) /** * jpeg_v4_0_3_hw_init - start and test JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * */ -static int jpeg_v4_0_3_hw_init(void *handle) +static int jpeg_v4_0_3_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int i, j, r, jpeg_inst; @@ -358,13 +366,13 @@ static int jpeg_v4_0_3_hw_init(void *handle) /** * jpeg_v4_0_3_hw_fini - stop the hardware block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the JPEG block, mark ring as not ready any more */ -static int jpeg_v4_0_3_hw_fini(void *handle) +static int jpeg_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int ret = 0; cancel_delayed_work_sync(&adev->jpeg.idle_work); @@ -380,20 +388,19 @@ static int jpeg_v4_0_3_hw_fini(void *handle) /** * jpeg_v4_0_3_suspend - suspend JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * HW fini and suspend JPEG block */ -static int jpeg_v4_0_3_suspend(void *handle) +static int jpeg_v4_0_3_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = jpeg_v4_0_3_hw_fini(adev); + r = jpeg_v4_0_3_hw_fini(ip_block); if (r) return r; - r = amdgpu_jpeg_suspend(adev); + r = amdgpu_jpeg_suspend(ip_block->adev); return r; } @@ -401,20 +408,19 @@ static int jpeg_v4_0_3_suspend(void *handle) /** * jpeg_v4_0_3_resume - resume JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Resume firmware and hw init JPEG block */ -static int jpeg_v4_0_3_resume(void *handle) +static int jpeg_v4_0_3_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = amdgpu_jpeg_resume(adev); + r = amdgpu_jpeg_resume(ip_block->adev); if (r) return r; - r = jpeg_v4_0_3_hw_init(adev); + r = jpeg_v4_0_3_hw_init(ip_block); return r; } @@ -674,11 +680,12 @@ void jpeg_v4_0_3_dec_ring_insert_start(struct amdgpu_ring *ring) amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, 0, 0, PACKETJ_TYPE0)); amdgpu_ring_write(ring, 0x62a04); /* PCTL0_MMHUB_DEEPSLEEP_IB */ - } - amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x80004000); + amdgpu_ring_write(ring, + PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, 0, + 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x80004000); + } } /** @@ -694,11 +701,12 @@ void jpeg_v4_0_3_dec_ring_insert_end(struct amdgpu_ring *ring) amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, 0, 0, PACKETJ_TYPE0)); amdgpu_ring_write(ring, 0x62a04); - } - amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x00004000); + amdgpu_ring_write(ring, + PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, 0, + 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x00004000); + } } /** @@ -743,14 +751,6 @@ void jpeg_v4_0_3_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6)); amdgpu_ring_write(ring, 0); - amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x3fbc); - - amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x1); - amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6)); amdgpu_ring_write(ring, 0); @@ -929,9 +929,9 @@ static bool jpeg_v4_0_3_is_idle(void *handle) return ret; } -static int jpeg_v4_0_3_wait_for_idle(void *handle) +static int jpeg_v4_0_3_wait_for_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int ret = 0; int i, j; @@ -1058,7 +1058,6 @@ static int jpeg_v4_0_3_process_interrupt(struct amdgpu_device *adev, static const struct amd_ip_funcs jpeg_v4_0_3_ip_funcs = { .name = "jpeg_v4_0_3", .early_init = jpeg_v4_0_3_early_init, - .late_init = NULL, .sw_init = jpeg_v4_0_3_sw_init, .sw_fini = jpeg_v4_0_3_sw_fini, .hw_init = jpeg_v4_0_3_hw_init, @@ -1067,14 +1066,8 @@ static const struct amd_ip_funcs jpeg_v4_0_3_ip_funcs = { .resume = jpeg_v4_0_3_resume, .is_idle = jpeg_v4_0_3_is_idle, .wait_for_idle = jpeg_v4_0_3_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = jpeg_v4_0_3_set_clockgating_state, .set_powergating_state = jpeg_v4_0_3_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs jpeg_v4_0_3_dec_ring_vm_funcs = { @@ -1088,7 +1081,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_3_dec_ring_vm_funcs = { SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + 8 + /* jpeg_v4_0_3_dec_ring_emit_vm_flush */ - 22 + 22 + /* jpeg_v4_0_3_dec_ring_emit_fence x2 vm fence */ + 18 + 18 + /* jpeg_v4_0_3_dec_ring_emit_fence x2 vm fence */ 8 + 16, .emit_ib_size = 22, /* jpeg_v4_0_3_dec_ring_emit_ib */ .emit_ib = jpeg_v4_0_3_dec_ring_emit_ib, diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c index 44eeed445ea9..1d9e3b101c3a 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c @@ -61,13 +61,13 @@ static int amdgpu_ih_clientid_jpeg[] = { /** * jpeg_v4_0_5_early_init - set function pointers * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers */ -static int jpeg_v4_0_5_early_init(void *handle) +static int jpeg_v4_0_5_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) { case IP_VERSION(4, 0, 5): @@ -94,13 +94,13 @@ static int jpeg_v4_0_5_early_init(void *handle) /** * jpeg_v4_0_5_sw_init - sw init for JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Load firmware and sw initialization */ -static int jpeg_v4_0_5_sw_init(void *handle) +static int jpeg_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int r, i; @@ -153,25 +153,33 @@ static int jpeg_v4_0_5_sw_init(void *handle) adev->jpeg.inst[i].external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, i, regUVD_JPEG_PITCH); } + /* TODO: Add queue reset mask when FW fully supports it */ + adev->jpeg.supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]); + r = amdgpu_jpeg_sysfs_reset_mask_init(adev); + if (r) + return r; + return 0; } /** * jpeg_v4_0_5_sw_fini - sw fini for JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * JPEG suspend and free up sw allocation */ -static int jpeg_v4_0_5_sw_fini(void *handle) +static int jpeg_v4_0_5_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_jpeg_suspend(adev); if (r) return r; + amdgpu_jpeg_sysfs_reset_mask_fini(adev); r = amdgpu_jpeg_sw_fini(adev); return r; @@ -180,12 +188,12 @@ static int jpeg_v4_0_5_sw_fini(void *handle) /** * jpeg_v4_0_5_hw_init - start and test JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * */ -static int jpeg_v4_0_5_hw_init(void *handle) +static int jpeg_v4_0_5_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int i, r = 0; @@ -210,16 +218,16 @@ static int jpeg_v4_0_5_hw_init(void *handle) /** * jpeg_v4_0_5_hw_fini - stop the hardware block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the JPEG block, mark ring as not ready any more */ -static int jpeg_v4_0_5_hw_fini(void *handle) +static int jpeg_v4_0_5_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; - cancel_delayed_work_sync(&adev->vcn.idle_work); + cancel_delayed_work_sync(&adev->jpeg.idle_work); for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { if (adev->jpeg.harvest_config & (1 << i)) @@ -237,20 +245,19 @@ static int jpeg_v4_0_5_hw_fini(void *handle) /** * jpeg_v4_0_5_suspend - suspend JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * HW fini and suspend JPEG block */ -static int jpeg_v4_0_5_suspend(void *handle) +static int jpeg_v4_0_5_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = jpeg_v4_0_5_hw_fini(adev); + r = jpeg_v4_0_5_hw_fini(ip_block); if (r) return r; - r = amdgpu_jpeg_suspend(adev); + r = amdgpu_jpeg_suspend(ip_block->adev); return r; } @@ -258,20 +265,19 @@ static int jpeg_v4_0_5_suspend(void *handle) /** * jpeg_v4_0_5_resume - resume JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Resume firmware and hw init JPEG block */ -static int jpeg_v4_0_5_resume(void *handle) +static int jpeg_v4_0_5_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = amdgpu_jpeg_resume(adev); + r = amdgpu_jpeg_resume(ip_block->adev); if (r) return r; - r = jpeg_v4_0_5_hw_init(adev); + r = jpeg_v4_0_5_hw_init(ip_block); return r; } @@ -637,9 +643,9 @@ static bool jpeg_v4_0_5_is_idle(void *handle) return ret; } -static int jpeg_v4_0_5_wait_for_idle(void *handle) +static int jpeg_v4_0_5_wait_for_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { @@ -743,7 +749,6 @@ static int jpeg_v4_0_5_process_interrupt(struct amdgpu_device *adev, static const struct amd_ip_funcs jpeg_v4_0_5_ip_funcs = { .name = "jpeg_v4_0_5", .early_init = jpeg_v4_0_5_early_init, - .late_init = NULL, .sw_init = jpeg_v4_0_5_sw_init, .sw_fini = jpeg_v4_0_5_sw_fini, .hw_init = jpeg_v4_0_5_hw_init, @@ -752,14 +757,8 @@ static const struct amd_ip_funcs jpeg_v4_0_5_ip_funcs = { .resume = jpeg_v4_0_5_resume, .is_idle = jpeg_v4_0_5_is_idle, .wait_for_idle = jpeg_v4_0_5_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = jpeg_v4_0_5_set_clockgating_state, .set_powergating_state = jpeg_v4_0_5_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs jpeg_v4_0_5_dec_ring_vm_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c index d662aa841f97..58fb1e5fa89c 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c @@ -42,13 +42,13 @@ static int jpeg_v5_0_0_set_powergating_state(void *handle, /** * jpeg_v5_0_0_early_init - set function pointers * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers */ -static int jpeg_v5_0_0_early_init(void *handle) +static int jpeg_v5_0_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->jpeg.num_jpeg_inst = 1; adev->jpeg.num_jpeg_rings = 1; @@ -62,13 +62,13 @@ static int jpeg_v5_0_0_early_init(void *handle) /** * jpeg_v5_0_0_sw_init - sw init for JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Load firmware and sw initialization */ -static int jpeg_v5_0_0_sw_init(void *handle) +static int jpeg_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int r; @@ -100,25 +100,32 @@ static int jpeg_v5_0_0_sw_init(void *handle) adev->jpeg.internal.jpeg_pitch[0] = regUVD_JPEG_PITCH_INTERNAL_OFFSET; adev->jpeg.inst->external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_PITCH); + /* TODO: Add queue reset mask when FW fully supports it */ + adev->jpeg.supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]); + r = amdgpu_jpeg_sysfs_reset_mask_init(adev); + if (r) + return r; return 0; } /** * jpeg_v5_0_0_sw_fini - sw fini for JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * JPEG suspend and free up sw allocation */ -static int jpeg_v5_0_0_sw_fini(void *handle) +static int jpeg_v5_0_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_jpeg_suspend(adev); if (r) return r; + amdgpu_jpeg_sysfs_reset_mask_fini(adev); r = amdgpu_jpeg_sw_fini(adev); return r; @@ -127,12 +134,12 @@ static int jpeg_v5_0_0_sw_fini(void *handle) /** * jpeg_v5_0_0_hw_init - start and test JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * */ -static int jpeg_v5_0_0_hw_init(void *handle) +static int jpeg_v5_0_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec; int r; @@ -153,15 +160,15 @@ static int jpeg_v5_0_0_hw_init(void *handle) /** * jpeg_v5_0_0_hw_fini - stop the hardware block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the JPEG block, mark ring as not ready any more */ -static int jpeg_v5_0_0_hw_fini(void *handle) +static int jpeg_v5_0_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; - cancel_delayed_work_sync(&adev->vcn.idle_work); + cancel_delayed_work_sync(&adev->jpeg.idle_work); if (adev->jpeg.cur_state != AMD_PG_STATE_GATE && RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS)) @@ -173,20 +180,19 @@ static int jpeg_v5_0_0_hw_fini(void *handle) /** * jpeg_v5_0_0_suspend - suspend JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * HW fini and suspend JPEG block */ -static int jpeg_v5_0_0_suspend(void *handle) +static int jpeg_v5_0_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = jpeg_v5_0_0_hw_fini(adev); + r = jpeg_v5_0_0_hw_fini(ip_block); if (r) return r; - r = amdgpu_jpeg_suspend(adev); + r = amdgpu_jpeg_suspend(ip_block->adev); return r; } @@ -194,20 +200,19 @@ static int jpeg_v5_0_0_suspend(void *handle) /** * jpeg_v5_0_0_resume - resume JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Resume firmware and hw init JPEG block */ -static int jpeg_v5_0_0_resume(void *handle) +static int jpeg_v5_0_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = amdgpu_jpeg_resume(adev); + r = amdgpu_jpeg_resume(ip_block->adev); if (r) return r; - r = jpeg_v5_0_0_hw_init(adev); + r = jpeg_v5_0_0_hw_init(ip_block); return r; } @@ -546,9 +551,9 @@ static bool jpeg_v5_0_0_is_idle(void *handle) return ret; } -static int jpeg_v5_0_0_wait_for_idle(void *handle) +static int jpeg_v5_0_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_JRBC_STATUS, UVD_JRBC_STATUS__RB_JOB_DONE_MASK, @@ -622,7 +627,6 @@ static int jpeg_v5_0_0_process_interrupt(struct amdgpu_device *adev, static const struct amd_ip_funcs jpeg_v5_0_0_ip_funcs = { .name = "jpeg_v5_0_0", .early_init = jpeg_v5_0_0_early_init, - .late_init = NULL, .sw_init = jpeg_v5_0_0_sw_init, .sw_fini = jpeg_v5_0_0_sw_fini, .hw_init = jpeg_v5_0_0_hw_init, @@ -631,14 +635,8 @@ static const struct amd_ip_funcs jpeg_v5_0_0_ip_funcs = { .resume = jpeg_v5_0_0_resume, .is_idle = jpeg_v5_0_0_is_idle, .wait_for_idle = jpeg_v5_0_0_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = jpeg_v5_0_0_set_clockgating_state, .set_powergating_state = jpeg_v5_0_0_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs jpeg_v5_0_0_dec_ring_vm_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index 231a3d490ea8..9c905b9e9376 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -55,8 +55,8 @@ MODULE_FIRMWARE("amdgpu/gc_11_5_1_mes1.bin"); MODULE_FIRMWARE("amdgpu/gc_11_5_2_mes_2.bin"); MODULE_FIRMWARE("amdgpu/gc_11_5_2_mes1.bin"); -static int mes_v11_0_hw_init(void *handle); -static int mes_v11_0_hw_fini(void *handle); +static int mes_v11_0_hw_init(struct amdgpu_ip_block *ip_block); +static int mes_v11_0_hw_fini(struct amdgpu_ip_block *ip_block); static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev); static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev); @@ -366,7 +366,7 @@ static int mes_v11_0_reset_queue_mmio(struct amdgpu_mes *mes, uint32_t queue_typ uint32_t queue_id, uint32_t vmid) { struct amdgpu_device *adev = mes->adev; - uint32_t value; + uint32_t value, reg; int i, r = 0; amdgpu_gfx_rlc_enter_safe_mode(adev, 0); @@ -424,6 +424,31 @@ static int mes_v11_0_reset_queue_mmio(struct amdgpu_mes *mes, uint32_t queue_typ } soc21_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); + } else if (queue_type == AMDGPU_RING_TYPE_SDMA) { + dev_info(adev->dev, "reset sdma queue (%d:%d:%d)\n", + me_id, pipe_id, queue_id); + switch (me_id) { + case 1: + reg = SOC15_REG_OFFSET(GC, 0, regSDMA1_QUEUE_RESET_REQ); + break; + case 0: + default: + reg = SOC15_REG_OFFSET(GC, 0, regSDMA0_QUEUE_RESET_REQ); + break; + } + + value = 1 << queue_id; + WREG32(reg, value); + /* wait for queue reset done */ + for (i = 0; i < adev->usec_timeout; i++) { + if (!(RREG32(reg) & value)) + break; + udelay(1); + } + if (i >= adev->usec_timeout) { + dev_err(adev->dev, "failed to wait on sdma queue reset done\n"); + r = -ETIMEDOUT; + } } amdgpu_gfx_rlc_exit_safe_mode(adev, 0); @@ -619,6 +644,18 @@ static int mes_v11_0_misc_op(struct amdgpu_mes *mes, sizeof(misc_pkt.set_shader_debugger.tcp_watch_cntl)); misc_pkt.set_shader_debugger.trap_en = input->set_shader_debugger.trap_en; break; + case MES_MISC_OP_CHANGE_CONFIG: + if ((mes->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) < 0x63) { + dev_err(mes->adev->dev, "MES FW versoin must be larger than 0x63 to support limit single process feature.\n"); + return -EINVAL; + } + misc_pkt.opcode = MESAPI_MISC__CHANGE_CONFIG; + misc_pkt.change_config.opcode = + MESAPI_MISC__CHANGE_CONFIG_OPTION_LIMIT_SINGLE_PROCESS; + misc_pkt.change_config.option.bits.limit_single_process = + input->change_config.option.limit_single_process; + break; + default: DRM_ERROR("unsupported misc op (%d) \n", input->op); return -EINVAL; @@ -683,6 +720,9 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes) mes->event_log_gpu_addr; } + if (enforce_isolation) + mes_set_hw_res_pkt.limit_single_process = 1; + return mes_v11_0_submit_pkt_and_poll_completion(mes, &mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt), offsetof(union MESAPI_SET_HW_RESOURCES, api_status)); @@ -883,6 +923,16 @@ static void mes_v11_0_enable(struct amdgpu_device *adev, bool enable) uint32_t pipe, data = 0; if (enable) { + if (amdgpu_mes_log_enable) { + WREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_LO, + lower_32_bits(adev->mes.event_log_gpu_addr + AMDGPU_MES_LOG_BUFFER_SIZE)); + WREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_HI, + upper_32_bits(adev->mes.event_log_gpu_addr + AMDGPU_MES_LOG_BUFFER_SIZE)); + dev_info(adev->dev, "Setup CP MES MSCRATCH address : 0x%x. 0x%x\n", + RREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_HI), + RREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_LO)); + } + data = RREG32_SOC15(GC, 0, regCP_MES_CNTL); data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_RESET, 1); data = REG_SET_FIELD(data, CP_MES_CNTL, @@ -1336,16 +1386,16 @@ static int mes_v11_0_mqd_sw_init(struct amdgpu_device *adev, return 0; } -static int mes_v11_0_sw_init(void *handle) +static int mes_v11_0_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int pipe, r; adev->mes.funcs = &mes_v11_0_funcs; adev->mes.kiq_hw_init = &mes_v11_0_kiq_hw_init; adev->mes.kiq_hw_fini = &mes_v11_0_kiq_hw_fini; - adev->mes.event_log_size = AMDGPU_MES_LOG_BUFFER_SIZE; + adev->mes.event_log_size = AMDGPU_MES_LOG_BUFFER_SIZE + AMDGPU_MES_MSCRATCH_SIZE; r = amdgpu_mes_init(adev); if (r) @@ -1377,9 +1427,9 @@ static int mes_v11_0_sw_init(void *handle) return 0; } -static int mes_v11_0_sw_fini(void *handle) +static int mes_v11_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int pipe; for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) { @@ -1473,6 +1523,7 @@ static void mes_v11_0_kiq_clear(struct amdgpu_device *adev) static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev) { int r = 0; + struct amdgpu_ip_block *ip_block; if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { @@ -1496,6 +1547,12 @@ static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev) mes_v11_0_kiq_setting(&adev->gfx.kiq[0].ring); + ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_MES); + if (unlikely(!ip_block)) { + dev_err(adev->dev, "Failed to get MES handle\n"); + return -EINVAL; + } + r = mes_v11_0_queue_init(adev, AMDGPU_MES_KIQ_PIPE); if (r) goto failure; @@ -1506,7 +1563,7 @@ static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev) adev->mes.enable_legacy_queue_map = false; if (adev->mes.enable_legacy_queue_map) { - r = mes_v11_0_hw_init(adev); + r = mes_v11_0_hw_init(ip_block); if (r) goto failure; } @@ -1514,7 +1571,7 @@ static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev) return r; failure: - mes_v11_0_hw_fini(adev); + mes_v11_0_hw_fini(ip_block); return r; } @@ -1535,10 +1592,10 @@ static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev) return 0; } -static int mes_v11_0_hw_init(void *handle) +static int mes_v11_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->mes.ring[0].sched.ready) goto out; @@ -1590,13 +1647,13 @@ out: return 0; failure: - mes_v11_0_hw_fini(adev); + mes_v11_0_hw_fini(ip_block); return r; } -static int mes_v11_0_hw_fini(void *handle) +static int mes_v11_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_is_mes_info_enable(adev)) { amdgpu_bo_free_kernel(&adev->mes.resource_1, &adev->mes.resource_1_gpu_addr, &adev->mes.resource_1_addr); @@ -1604,33 +1661,31 @@ static int mes_v11_0_hw_fini(void *handle) return 0; } -static int mes_v11_0_suspend(void *handle) +static int mes_v11_0_suspend(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_mes_suspend(adev); + r = amdgpu_mes_suspend(ip_block->adev); if (r) return r; - return mes_v11_0_hw_fini(adev); + return mes_v11_0_hw_fini(ip_block); } -static int mes_v11_0_resume(void *handle) +static int mes_v11_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = mes_v11_0_hw_init(adev); + r = mes_v11_0_hw_init(ip_block); if (r) return r; - return amdgpu_mes_resume(adev); + return amdgpu_mes_resume(ip_block->adev); } -static int mes_v11_0_early_init(void *handle) +static int mes_v11_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int pipe, r; for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) { @@ -1644,9 +1699,9 @@ static int mes_v11_0_early_init(void *handle) return 0; } -static int mes_v11_0_late_init(void *handle) +static int mes_v11_0_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* it's only intended for use in mes_self_test case, not for s0ix and reset */ if (!amdgpu_in_reset(adev) && !adev->in_s0ix && !adev->in_suspend && @@ -1666,8 +1721,6 @@ static const struct amd_ip_funcs mes_v11_0_ip_funcs = { .hw_fini = mes_v11_0_hw_fini, .suspend = mes_v11_0_suspend, .resume = mes_v11_0_resume, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; const struct amdgpu_ip_block_version mes_v11_0_ip_block = { diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c index 8d27421689c9..9ecc5d61e49b 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c @@ -39,8 +39,8 @@ MODULE_FIRMWARE("amdgpu/gc_12_0_1_mes.bin"); MODULE_FIRMWARE("amdgpu/gc_12_0_1_mes1.bin"); MODULE_FIRMWARE("amdgpu/gc_12_0_1_uni_mes.bin"); -static int mes_v12_0_hw_init(void *handle); -static int mes_v12_0_hw_fini(void *handle); +static int mes_v12_0_hw_init(struct amdgpu_ip_block *ip_block); +static int mes_v12_0_hw_fini(struct amdgpu_ip_block *ip_block); static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev); static int mes_v12_0_kiq_hw_fini(struct amdgpu_device *adev); @@ -531,6 +531,14 @@ static int mes_v12_0_misc_op(struct amdgpu_mes *mes, sizeof(misc_pkt.set_shader_debugger.tcp_watch_cntl)); misc_pkt.set_shader_debugger.trap_en = input->set_shader_debugger.trap_en; break; + case MES_MISC_OP_CHANGE_CONFIG: + misc_pkt.opcode = MESAPI_MISC__CHANGE_CONFIG; + misc_pkt.change_config.opcode = + MESAPI_MISC__CHANGE_CONFIG_OPTION_LIMIT_SINGLE_PROCESS; + misc_pkt.change_config.option.bits.limit_single_process = + input->change_config.option.limit_single_process; + break; + default: DRM_ERROR("unsupported misc op (%d) \n", input->op); return -EINVAL; @@ -550,7 +558,7 @@ static int mes_v12_0_set_hw_resources_1(struct amdgpu_mes *mes, int pipe) mes_set_hw_res_1_pkt.header.type = MES_API_TYPE_SCHEDULER; mes_set_hw_res_1_pkt.header.opcode = MES_SCH_API_SET_HW_RSRC_1; mes_set_hw_res_1_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS; - mes_set_hw_res_1_pkt.mes_kiq_unmap_timeout = 100; + mes_set_hw_res_1_pkt.mes_kiq_unmap_timeout = 0xa; return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe, &mes_set_hw_res_1_pkt, sizeof(mes_set_hw_res_1_pkt), @@ -621,9 +629,12 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes, int pipe) if (amdgpu_mes_log_enable) { mes_set_hw_res_pkt.enable_mes_event_int_logging = 1; - mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr; + mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr + pipe * AMDGPU_MES_LOG_BUFFER_SIZE; } + if (enforce_isolation) + mes_set_hw_res_pkt.limit_single_process = 1; + return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe, &mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt), offsetof(union MESAPI_SET_HW_RESOURCES, api_status)); @@ -1326,9 +1337,9 @@ static int mes_v12_0_mqd_sw_init(struct amdgpu_device *adev, return 0; } -static int mes_v12_0_sw_init(void *handle) +static int mes_v12_0_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int pipe, r; adev->mes.funcs = &mes_v12_0_funcs; @@ -1336,7 +1347,7 @@ static int mes_v12_0_sw_init(void *handle) adev->mes.kiq_hw_fini = &mes_v12_0_kiq_hw_fini; adev->mes.enable_legacy_queue_map = true; - adev->mes.event_log_size = AMDGPU_MES_LOG_BUFFER_SIZE; + adev->mes.event_log_size = adev->enable_uni_mes ? (AMDGPU_MAX_MES_PIPES * AMDGPU_MES_LOG_BUFFER_SIZE) : AMDGPU_MES_LOG_BUFFER_SIZE; r = amdgpu_mes_init(adev); if (r) @@ -1362,9 +1373,9 @@ static int mes_v12_0_sw_init(void *handle) return 0; } -static int mes_v12_0_sw_fini(void *handle) +static int mes_v12_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int pipe; for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) { @@ -1452,6 +1463,7 @@ static void mes_v12_0_kiq_setting(struct amdgpu_ring *ring) static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev) { int r = 0; + struct amdgpu_ip_block *ip_block; if (adev->enable_uni_mes) mes_v12_0_kiq_setting(&adev->mes.ring[AMDGPU_MES_KIQ_PIPE]); @@ -1479,6 +1491,12 @@ static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev) mes_v12_0_enable(adev, true); + ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_MES); + if (unlikely(!ip_block)) { + dev_err(adev->dev, "Failed to get MES handle\n"); + return -EINVAL; + } + r = mes_v12_0_queue_init(adev, AMDGPU_MES_KIQ_PIPE); if (r) goto failure; @@ -1492,7 +1510,7 @@ static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev) } if (adev->mes.enable_legacy_queue_map) { - r = mes_v12_0_hw_init(adev); + r = mes_v12_0_hw_init(ip_block); if (r) goto failure; } @@ -1500,7 +1518,7 @@ static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev) return r; failure: - mes_v12_0_hw_fini(adev); + mes_v12_0_hw_fini(ip_block); return r; } @@ -1522,10 +1540,10 @@ static int mes_v12_0_kiq_hw_fini(struct amdgpu_device *adev) return 0; } -static int mes_v12_0_hw_init(void *handle) +static int mes_v12_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->mes.ring[0].sched.ready) goto out; @@ -1584,42 +1602,40 @@ out: return 0; failure: - mes_v12_0_hw_fini(adev); + mes_v12_0_hw_fini(ip_block); return r; } -static int mes_v12_0_hw_fini(void *handle) +static int mes_v12_0_hw_fini(struct amdgpu_ip_block *ip_block) { return 0; } -static int mes_v12_0_suspend(void *handle) +static int mes_v12_0_suspend(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_mes_suspend(adev); + r = amdgpu_mes_suspend(ip_block->adev); if (r) return r; - return mes_v12_0_hw_fini(adev); + return mes_v12_0_hw_fini(ip_block); } -static int mes_v12_0_resume(void *handle) +static int mes_v12_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = mes_v12_0_hw_init(adev); + r = mes_v12_0_hw_init(ip_block); if (r) return r; - return amdgpu_mes_resume(adev); + return amdgpu_mes_resume(ip_block->adev); } -static int mes_v12_0_early_init(void *handle) +static int mes_v12_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int pipe, r; for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) { @@ -1631,9 +1647,9 @@ static int mes_v12_0_early_init(void *handle) return 0; } -static int mes_v12_0_late_init(void *handle) +static int mes_v12_0_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* it's only intended for use in mes_self_test case, not for s0ix and reset */ if (!amdgpu_in_reset(adev) && !adev->in_s0ix && !adev->in_suspend) diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index e3ddd22aa172..e9a6f33ca710 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -229,6 +229,52 @@ static void mmhub_v1_0_disable_identity_aperture(struct amdgpu_device *adev) 0); } +static void mmhub_v1_0_init_saw(struct amdgpu_device *adev) +{ + uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); + uint32_t tmp; + + /* VM_9_X_REGISTER_VM_L2_SAW_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32 */ + WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, + lower_32_bits(pt_base >> 12)); + + /* VM_9_X_REGISTER_VM_L2_SAW_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32 */ + WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, + upper_32_bits(pt_base >> 12)); + + /* VM_9_X_REGISTER_VM_L2_SAW_CONTEXT0_PAGE_TABLE_START_ADDR_LO32 */ + WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, + (u32)(adev->gmc.gart_start >> 12)); + + /* VM_9_X_REGISTER_VM_L2_SAW_CONTEXT0_PAGE_TABLE_START_ADDR_HI32 */ + WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, + (u32)(adev->gmc.gart_start >> 44)); + + /* VM_9_X_REGISTER_VM_L2_SAW_CONTEXT0_PAGE_TABLE_END_ADDR_LO32 */ + WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, + (u32)(adev->gmc.gart_end >> 12)); + + /* VM_9_X_REGISTER_VM_L2_SAW_CONTEXT0_PAGE_TABLE_END_ADDR_HI32 */ + WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, + (u32)(adev->gmc.gart_end >> 44)); + + /* Program SAW CONTEXT0 CNTL */ + tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXT0_CNTL); + tmp |= 1 << CONTEXT0_CNTL_ENABLE_OFFSET; + tmp &= ~(3 << CONTEXT0_CNTL_PAGE_TABLE_DEPTH_OFFSET); + WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXT0_CNTL, tmp); + + /* Disable all Contexts except Context0 */ + tmp = 0xfffe; + WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXTS_DISABLE, tmp); + + /* Program SAW CNTL4 */ + tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CNTL4); + tmp |= 1 << VMC_TAP_PDE_REQUEST_SNOOP_OFFSET; + tmp |= 1 << VMC_TAP_PTE_REQUEST_SNOOP_OFFSET; + WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CNTL4, tmp); +} + static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; @@ -283,6 +329,9 @@ static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev) i * hub->ctx_addr_distance, upper_32_bits(adev->vm_manager.max_pfn - 1)); } + + if (amdgpu_ip_version(adev, ISP_HWIP, 0)) + mmhub_v1_0_init_saw(adev); } static void mmhub_v1_0_program_invalidation(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c index f47bd7ada4d7..4dcb72d1bdda 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c @@ -61,15 +61,18 @@ static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev) static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev, enum idh_event event) { + int r = 0; u32 reg; reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0); - if (reg != event) + if (reg == IDH_FAIL) + r = -EINVAL; + else if (reg != event) return -ENOENT; xgpu_nv_mailbox_send_ack(adev); - return 0; + return r; } static uint8_t xgpu_nv_peek_ack(struct amdgpu_device *adev) @@ -178,6 +181,9 @@ send_request: if (data1 != 0) event = IDH_RAS_POISON_READY; break; + case IDH_REQ_RAS_ERROR_COUNT: + event = IDH_RAS_ERROR_COUNT_READY; + break; default: break; } @@ -456,6 +462,11 @@ static bool xgpu_nv_rcvd_ras_intr(struct amdgpu_device *adev) return (msg == IDH_RAS_ERROR_DETECTED || msg == 0xFFFFFFFF); } +static int xgpu_nv_req_ras_err_count(struct amdgpu_device *adev) +{ + return xgpu_nv_send_access_requests(adev, IDH_REQ_RAS_ERROR_COUNT); +} + const struct amdgpu_virt_ops xgpu_nv_virt_ops = { .req_full_gpu = xgpu_nv_request_full_gpu_access, .rel_full_gpu = xgpu_nv_release_full_gpu_access, @@ -466,4 +477,5 @@ const struct amdgpu_virt_ops xgpu_nv_virt_ops = { .trans_msg = xgpu_nv_mailbox_trans_msg, .ras_poison_handler = xgpu_nv_ras_poison_handler, .rcvd_ras_intr = xgpu_nv_rcvd_ras_intr, + .req_ras_err_count = xgpu_nv_req_ras_err_count, }; diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h index 1d099ffb3a5a..9d61d76e1bf9 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h @@ -40,6 +40,7 @@ enum idh_request { IDH_LOG_VF_ERROR = 200, IDH_READY_TO_RESET = 201, IDH_RAS_POISON = 202, + IDH_REQ_RAS_ERROR_COUNT = 203, }; enum idh_event { @@ -54,6 +55,8 @@ enum idh_event { IDH_RAS_POISON_READY, IDH_PF_SOFT_FLR_NOTIFICATION, IDH_RAS_ERROR_DETECTED, + IDH_RAS_ERROR_COUNT_READY = 11, + IDH_TEXT_MESSAGE = 255, }; diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c index b281462093f1..0820ed62e2e8 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c @@ -542,19 +542,19 @@ static void navi10_ih_set_self_irq_funcs(struct amdgpu_device *adev) adev->irq.self_irq.funcs = &navi10_ih_self_irq_funcs; } -static int navi10_ih_early_init(void *handle) +static int navi10_ih_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; navi10_ih_set_interrupt_funcs(adev); navi10_ih_set_self_irq_funcs(adev); return 0; } -static int navi10_ih_sw_init(void *handle) +static int navi10_ih_sw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; bool use_bus_addr; r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_IH, 0, @@ -593,43 +593,37 @@ static int navi10_ih_sw_init(void *handle) return r; } -static int navi10_ih_sw_fini(void *handle) +static int navi10_ih_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_fini_sw(adev); return 0; } -static int navi10_ih_hw_init(void *handle) +static int navi10_ih_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return navi10_ih_irq_init(adev); } -static int navi10_ih_hw_fini(void *handle) +static int navi10_ih_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - navi10_ih_irq_disable(adev); + navi10_ih_irq_disable(ip_block->adev); return 0; } -static int navi10_ih_suspend(void *handle) +static int navi10_ih_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return navi10_ih_hw_fini(adev); + return navi10_ih_hw_fini(ip_block); } -static int navi10_ih_resume(void *handle) +static int navi10_ih_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return navi10_ih_hw_init(adev); + return navi10_ih_hw_init(ip_block); } static bool navi10_ih_is_idle(void *handle) @@ -638,13 +632,13 @@ static bool navi10_ih_is_idle(void *handle) return true; } -static int navi10_ih_wait_for_idle(void *handle) +static int navi10_ih_wait_for_idle(struct amdgpu_ip_block *ip_block) { /* todo */ return -ETIMEDOUT; } -static int navi10_ih_soft_reset(void *handle) +static int navi10_ih_soft_reset(struct amdgpu_ip_block *ip_block) { /* todo */ return 0; @@ -700,7 +694,6 @@ static void navi10_ih_get_clockgating_state(void *handle, u64 *flags) static const struct amd_ip_funcs navi10_ih_ip_funcs = { .name = "navi10_ih", .early_init = navi10_ih_early_init, - .late_init = NULL, .sw_init = navi10_ih_sw_init, .sw_fini = navi10_ih_sw_fini, .hw_init = navi10_ih_hw_init, @@ -713,8 +706,6 @@ static const struct amd_ip_funcs navi10_ih_ip_funcs = { .set_clockgating_state = navi10_ih_set_clockgating_state, .set_powergating_state = navi10_ih_set_powergating_state, .get_clockgating_state = navi10_ih_get_clockgating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ih_funcs navi10_ih_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h b/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h index a5b60c9a2418..c88284ff92d8 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h +++ b/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h @@ -68,6 +68,7 @@ #define SDMA_SUBOP_POLL_REG_WRITE_MEM 1 #define SDMA_SUBOP_POLL_DBIT_WRITE_MEM 2 #define SDMA_SUBOP_POLL_MEM_VERIFY 3 +#define SDMA_SUBOP_VM_INVALIDATION 4 #define HEADER_AGENT_DISPATCH 4 #define HEADER_BARRIER 5 #define SDMA_OP_AQL_COPY 0 @@ -4041,6 +4042,69 @@ /* +** Definitions for SDMA_PKT_VM_INVALIDATION packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_VM_INVALIDATION_HEADER_op_offset 0 +#define SDMA_PKT_VM_INVALIDATION_HEADER_op_mask 0x000000FF +#define SDMA_PKT_VM_INVALIDATION_HEADER_op_shift 0 +#define SDMA_PKT_VM_INVALIDATION_HEADER_OP(x) (((x) & SDMA_PKT_VM_INVALIDATION_HEADER_op_mask) << SDMA_PKT_VM_INVALIDATION_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_VM_INVALIDATION_HEADER_sub_op_offset 0 +#define SDMA_PKT_VM_INVALIDATION_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_VM_INVALIDATION_HEADER_sub_op_shift 8 +#define SDMA_PKT_VM_INVALIDATION_HEADER_SUB_OP(x) (((x) & SDMA_PKT_VM_INVALIDATION_HEADER_sub_op_mask) << SDMA_PKT_VM_INVALIDATION_HEADER_sub_op_shift) + +/*define for gfx_eng_id field*/ +#define SDMA_PKT_VM_INVALIDATION_HEADER_gfx_eng_id_offset 0 +#define SDMA_PKT_VM_INVALIDATION_HEADER_gfx_eng_id_mask 0x0000001F +#define SDMA_PKT_VM_INVALIDATION_HEADER_gfx_eng_id_shift 16 +#define SDMA_PKT_VM_INVALIDATION_HEADER_GFX_ENG_ID(x) (((x) & SDMA_PKT_VM_INVALIDATION_HEADER_gfx_eng_id_mask) << SDMA_PKT_VM_INVALIDATION_HEADER_gfx_eng_id_shift) + +/*define for mm_eng_id field*/ +#define SDMA_PKT_VM_INVALIDATION_HEADER_mm_eng_id_offset 0 +#define SDMA_PKT_VM_INVALIDATION_HEADER_mm_eng_id_mask 0x0000001F +#define SDMA_PKT_VM_INVALIDATION_HEADER_mm_eng_id_shift 24 +#define SDMA_PKT_VM_INVALIDATION_HEADER_MM_ENG_ID(x) (((x) & SDMA_PKT_VM_INVALIDATION_HEADER_mm_eng_id_mask) << SDMA_PKT_VM_INVALIDATION_HEADER_mm_eng_id_shift) + +/*define for INVALIDATEREQ word*/ +/*define for invalidatereq field*/ +#define SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_invalidatereq_offset 1 +#define SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_invalidatereq_mask 0xFFFFFFFF +#define SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_invalidatereq_shift 0 +#define SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_INVALIDATEREQ(x) (((x) & SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_invalidatereq_mask) << SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_invalidatereq_shift) + +/*define for ADDRESSRANGELO word*/ +/*define for addressrangelo field*/ +#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_addressrangelo_offset 2 +#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_addressrangelo_mask 0xFFFFFFFF +#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_addressrangelo_shift 0 +#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_ADDRESSRANGELO(x) (((x) & SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_addressrangelo_mask) << SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_addressrangelo_shift) + +/*define for ADDRESSRANGEHI word*/ +/*define for invalidateack field*/ +#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_invalidateack_offset 3 +#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_invalidateack_mask 0x0000FFFF +#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_invalidateack_shift 0 +#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_INVALIDATEACK(x) (((x) & SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_invalidateack_mask) << SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_invalidateack_shift) + +/*define for addressrangehi field*/ +#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_addressrangehi_offset 3 +#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_addressrangehi_mask 0x0000001F +#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_addressrangehi_shift 16 +#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_ADDRESSRANGEHI(x) (((x) & SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_addressrangehi_mask) << SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_addressrangehi_shift) + +/*define for reserved field*/ +#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_reserved_offset 3 +#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_reserved_mask 0x000001FF +#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_reserved_shift 23 +#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_RESERVED(x) (((x) & SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_reserved_mask) << SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_reserved_shift) + + +/* ** Definitions for SDMA_PKT_ATOMIC packet */ diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c index 7a9adfda5814..814ab59fdd4a 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c @@ -275,6 +275,15 @@ static void nbio_v7_11_init_registers(struct amdgpu_device *adev) if (def != data) WREG32_SOC15(NBIO, 0, regBIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3, data); + switch (adev->ip_versions[NBIO_HWIP][0]) { + case IP_VERSION(7, 11, 0): + case IP_VERSION(7, 11, 1): + case IP_VERSION(7, 11, 2): + case IP_VERSION(7, 11, 3): + data = RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF5_STRAP4) & ~BIT(23); + WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF5_STRAP4, data); + break; + } } static void nbio_v7_11_update_medium_grain_clock_gating(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c index 8d80df94bd8b..a26a9be58eac 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c @@ -414,8 +414,7 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device /* ras_controller_int is dedicated for nbif ras error, * not the global interrupt for sync flood */ - amdgpu_ras_set_fed(adev, true); - amdgpu_ras_reset_gpu(adev); + amdgpu_ras_global_ras_isr(adev); } amdgpu_ras_error_data_fini(&err_data); diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c index fb37e354a9d5..1ac730328516 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c @@ -247,6 +247,12 @@ static void nbio_v7_7_init_registers(struct amdgpu_device *adev) if (def != data) WREG32_SOC15(NBIO, 0, regBIF0_PCIE_MST_CTRL_3, data); + switch (adev->ip_versions[NBIO_HWIP][0]) { + case IP_VERSION(7, 7, 0): + data = RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF5_STRAP4) & ~BIT(23); + WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF5_STRAP4, data); + break; + } } static void nbio_v7_7_update_medium_grain_clock_gating(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c index d1bd79bbae53..8a0a63ac88d2 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c @@ -401,6 +401,17 @@ static int nbio_v7_9_get_compute_partition_mode(struct amdgpu_device *adev) return px; } +static bool nbio_v7_9_is_nps_switch_requested(struct amdgpu_device *adev) +{ + u32 tmp; + + tmp = RREG32_SOC15(NBIO, 0, regBIF_BX_PF0_PARTITION_MEM_STATUS); + tmp = REG_GET_FIELD(tmp, BIF_BX_PF0_PARTITION_MEM_STATUS, + CHANGE_STATUE); + + /* 0x8 - NPS switch requested */ + return (tmp == 0x8); +} static u32 nbio_v7_9_get_memory_partition_mode(struct amdgpu_device *adev, u32 *supp_modes) { @@ -508,6 +519,7 @@ const struct amdgpu_nbio_funcs nbio_v7_9_funcs = { .remap_hdp_registers = nbio_v7_9_remap_hdp_registers, .get_compute_partition_mode = nbio_v7_9_get_compute_partition_mode, .get_memory_partition_mode = nbio_v7_9_get_memory_partition_mode, + .is_nps_switch_requested = nbio_v7_9_is_nps_switch_requested, .init_registers = nbio_v7_9_init_registers, .get_pcie_replay_count = nbio_v7_9_get_pcie_replay_count, .set_reg_remap = nbio_v7_9_set_reg_remap, diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 4938e6b340e9..3bad565ded73 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -67,8 +67,8 @@ static const struct amd_ip_funcs nv_common_ip_funcs; /* Navi */ static const struct amdgpu_video_codec_info nv_video_codecs_encode_array[] = { - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 0)}, }; static const struct amdgpu_video_codecs nv_video_codecs_encode = { @@ -94,8 +94,8 @@ static const struct amdgpu_video_codecs nv_video_codecs_decode = { /* Sienna Cichlid */ static const struct amdgpu_video_codec_info sc_video_codecs_encode_array[] = { - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2160, 0)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 7680, 4352, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)}, }; static const struct amdgpu_video_codecs sc_video_codecs_encode = { @@ -136,8 +136,8 @@ static const struct amdgpu_video_codecs sc_video_codecs_decode_vcn1 = { /* SRIOV Sienna Cichlid, not const since data is controlled by host */ static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] = { - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2160, 0)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 7680, 4352, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)}, }; static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn0[] = { @@ -634,9 +634,9 @@ static const struct amdgpu_asic_funcs nv_asic_funcs = { .query_video_codecs = &nv_query_video_codecs, }; -static int nv_common_early_init(void *handle) +static int nv_common_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->nbio.funcs->set_reg_remap(adev); adev->smc_rreg = NULL; @@ -944,9 +944,9 @@ static int nv_common_early_init(void *handle) return 0; } -static int nv_common_late_init(void *handle) +static int nv_common_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) { xgpu_nv_mailbox_get_irq(adev); @@ -973,9 +973,9 @@ static int nv_common_late_init(void *handle) return 0; } -static int nv_common_sw_init(void *handle) +static int nv_common_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) xgpu_nv_mailbox_add_irq_id(adev); @@ -983,14 +983,9 @@ static int nv_common_sw_init(void *handle) return 0; } -static int nv_common_sw_fini(void *handle) +static int nv_common_hw_init(struct amdgpu_ip_block *ip_block) { - return 0; -} - -static int nv_common_hw_init(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->nbio.funcs->apply_lc_spc_mode_wa) adev->nbio.funcs->apply_lc_spc_mode_wa(adev); @@ -1014,9 +1009,9 @@ static int nv_common_hw_init(void *handle) return 0; } -static int nv_common_hw_fini(void *handle) +static int nv_common_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* Disable the doorbell aperture and selfring doorbell aperture * separately in hw_fini because nv_enable_doorbell_aperture @@ -1029,18 +1024,14 @@ static int nv_common_hw_fini(void *handle) return 0; } -static int nv_common_suspend(void *handle) +static int nv_common_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return nv_common_hw_fini(adev); + return nv_common_hw_fini(ip_block); } -static int nv_common_resume(void *handle) +static int nv_common_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return nv_common_hw_init(adev); + return nv_common_hw_init(ip_block); } static bool nv_common_is_idle(void *handle) @@ -1048,16 +1039,6 @@ static bool nv_common_is_idle(void *handle) return true; } -static int nv_common_wait_for_idle(void *handle) -{ - return 0; -} - -static int nv_common_soft_reset(void *handle) -{ - return 0; -} - static int nv_common_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -1115,17 +1096,12 @@ static const struct amd_ip_funcs nv_common_ip_funcs = { .early_init = nv_common_early_init, .late_init = nv_common_late_init, .sw_init = nv_common_sw_init, - .sw_fini = nv_common_sw_fini, .hw_init = nv_common_hw_init, .hw_fini = nv_common_hw_fini, .suspend = nv_common_suspend, .resume = nv_common_resume, .is_idle = nv_common_is_idle, - .wait_for_idle = nv_common_wait_for_idle, - .soft_reset = nv_common_soft_reset, .set_clockgating_state = nv_common_set_clockgating_state, .set_powergating_state = nv_common_set_powergating_state, .get_clockgating_state = nv_common_get_clockgating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h index 37b5ddd6f13b..f4a91b126c73 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h +++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h @@ -103,6 +103,10 @@ enum psp_gfx_cmd_id GFX_CMD_ID_AUTOLOAD_RLC = 0x00000021, /* Indicates all graphics fw loaded, start RLC autoload */ GFX_CMD_ID_BOOT_CFG = 0x00000022, /* Boot Config */ GFX_CMD_ID_SRIOV_SPATIAL_PART = 0x00000027, /* Configure spatial partitioning mode */ + /*IDs of performance monitoring/profiling*/ + GFX_CMD_ID_CONFIG_SQ_PERFMON = 0x00000046, /* Config CGTT_SQ_CLK_CTRL */ + /* Dynamic memory partitioninig (NPS mode change)*/ + GFX_CMD_ID_FB_NPS_MODE = 0x00000048, /* Configure memory partitioning mode */ }; /* PSP boot config sub-commands */ @@ -351,6 +355,20 @@ struct psp_gfx_cmd_sriov_spatial_part { uint32_t override_this_aid; }; +/*Structure for sq performance monitoring/profiling enable/disable*/ +struct psp_gfx_cmd_config_sq_perfmon { + uint32_t gfx_xcp_mask; + uint8_t core_override; + uint8_t reg_override; + uint8_t perfmon_override; + uint8_t reserved[5]; +}; + +struct psp_gfx_cmd_fb_memory_part { + uint32_t mode; /* requested NPS mode */ + uint32_t resvd; +}; + /* All GFX ring buffer commands. */ union psp_gfx_commands { @@ -365,6 +383,8 @@ union psp_gfx_commands struct psp_gfx_cmd_load_toc cmd_load_toc; struct psp_gfx_cmd_boot_cfg boot_cfg; struct psp_gfx_cmd_sriov_spatial_part cmd_spatial_part; + struct psp_gfx_cmd_config_sq_perfmon config_sq_perfmon; + struct psp_gfx_cmd_fb_memory_part cmd_memory_part; }; struct psp_gfx_uresp_reserved diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c index 51e470e8d67d..c4b775aaee9f 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c @@ -823,6 +823,30 @@ static bool psp_v13_0_is_aux_sos_load_required(struct psp_context *psp) return (pmfw_ver < 0x557300); } +static bool psp_v13_0_is_reload_needed(struct psp_context *psp) +{ + uint32_t ucode_ver; + + if (!psp_v13_0_is_sos_alive(psp)) + return false; + + /* Restrict reload support only to specific IP versions */ + switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) { + case IP_VERSION(13, 0, 2): + case IP_VERSION(13, 0, 6): + case IP_VERSION(13, 0, 14): + /* TOS version read from microcode header */ + ucode_ver = psp->sos.fw_version; + /* Read TOS version from hardware */ + psp_v13_0_init_sos_version(psp); + return (ucode_ver != psp->sos.fw_version); + default: + return false; + } + + return false; +} + static const struct psp_funcs psp_v13_0_funcs = { .init_microcode = psp_v13_0_init_microcode, .wait_for_bootloader = psp_v13_0_wait_for_bootloader_steady_state, @@ -847,6 +871,7 @@ static const struct psp_funcs psp_v13_0_funcs = { .fatal_error_recovery_quirk = psp_v13_0_fatal_error_recovery_quirk, .get_ras_capability = psp_v13_0_get_ras_capability, .is_aux_sos_load_required = psp_v13_0_is_aux_sos_load_required, + .is_reload_needed = psp_v13_0_is_reload_needed, }; void psp_v13_0_set_psp_funcs(struct psp_context *psp) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index 725392522267..7948d74f8722 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -807,9 +807,9 @@ static void sdma_v2_4_ring_emit_wreg(struct amdgpu_ring *ring, amdgpu_ring_write(ring, val); } -static int sdma_v2_4_early_init(void *handle) +static int sdma_v2_4_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; adev->sdma.num_instances = SDMA_MAX_INSTANCE; @@ -826,11 +826,11 @@ static int sdma_v2_4_early_init(void *handle) return 0; } -static int sdma_v2_4_sw_init(void *handle) +static int sdma_v2_4_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* SDMA trap event */ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP, @@ -866,9 +866,9 @@ static int sdma_v2_4_sw_init(void *handle) return r; } -static int sdma_v2_4_sw_fini(void *handle) +static int sdma_v2_4_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; for (i = 0; i < adev->sdma.num_instances; i++) @@ -878,10 +878,10 @@ static int sdma_v2_4_sw_fini(void *handle) return 0; } -static int sdma_v2_4_hw_init(void *handle) +static int sdma_v2_4_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; sdma_v2_4_init_golden_registers(adev); @@ -892,27 +892,21 @@ static int sdma_v2_4_hw_init(void *handle) return r; } -static int sdma_v2_4_hw_fini(void *handle) +static int sdma_v2_4_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - sdma_v2_4_enable(adev, false); + sdma_v2_4_enable(ip_block->adev, false); return 0; } -static int sdma_v2_4_suspend(void *handle) +static int sdma_v2_4_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return sdma_v2_4_hw_fini(adev); + return sdma_v2_4_hw_fini(ip_block); } -static int sdma_v2_4_resume(void *handle) +static int sdma_v2_4_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return sdma_v2_4_hw_init(adev); + return sdma_v2_4_hw_init(ip_block); } static bool sdma_v2_4_is_idle(void *handle) @@ -927,11 +921,11 @@ static bool sdma_v2_4_is_idle(void *handle) return true; } -static int sdma_v2_4_wait_for_idle(void *handle) +static int sdma_v2_4_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK | @@ -944,10 +938,10 @@ static int sdma_v2_4_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int sdma_v2_4_soft_reset(void *handle) +static int sdma_v2_4_soft_reset(struct amdgpu_ip_block *ip_block) { u32 srbm_soft_reset = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 tmp = RREG32(mmSRBM_STATUS2); if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) { @@ -1102,7 +1096,6 @@ static int sdma_v2_4_set_powergating_state(void *handle, static const struct amd_ip_funcs sdma_v2_4_ip_funcs = { .name = "sdma_v2_4", .early_init = sdma_v2_4_early_init, - .late_init = NULL, .sw_init = sdma_v2_4_sw_init, .sw_fini = sdma_v2_4_sw_fini, .hw_init = sdma_v2_4_hw_init, @@ -1114,8 +1107,6 @@ static const struct amd_ip_funcs sdma_v2_4_ip_funcs = { .soft_reset = sdma_v2_4_soft_reset, .set_clockgating_state = sdma_v2_4_set_clockgating_state, .set_powergating_state = sdma_v2_4_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index e65194fe94af..9a3d729545a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -1080,9 +1080,9 @@ static void sdma_v3_0_ring_emit_wreg(struct amdgpu_ring *ring, amdgpu_ring_write(ring, val); } -static int sdma_v3_0_early_init(void *handle) +static int sdma_v3_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; switch (adev->asic_type) { @@ -1106,11 +1106,11 @@ static int sdma_v3_0_early_init(void *handle) return 0; } -static int sdma_v3_0_sw_init(void *handle) +static int sdma_v3_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* SDMA trap event */ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP, @@ -1152,9 +1152,9 @@ static int sdma_v3_0_sw_init(void *handle) return r; } -static int sdma_v3_0_sw_fini(void *handle) +static int sdma_v3_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; for (i = 0; i < adev->sdma.num_instances; i++) @@ -1164,10 +1164,10 @@ static int sdma_v3_0_sw_fini(void *handle) return 0; } -static int sdma_v3_0_hw_init(void *handle) +static int sdma_v3_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; sdma_v3_0_init_golden_registers(adev); @@ -1178,9 +1178,9 @@ static int sdma_v3_0_hw_init(void *handle) return r; } -static int sdma_v3_0_hw_fini(void *handle) +static int sdma_v3_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; sdma_v3_0_ctx_switch_enable(adev, false); sdma_v3_0_enable(adev, false); @@ -1188,18 +1188,14 @@ static int sdma_v3_0_hw_fini(void *handle) return 0; } -static int sdma_v3_0_suspend(void *handle) +static int sdma_v3_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return sdma_v3_0_hw_fini(adev); + return sdma_v3_0_hw_fini(ip_block); } -static int sdma_v3_0_resume(void *handle) +static int sdma_v3_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return sdma_v3_0_hw_init(adev); + return sdma_v3_0_hw_init(ip_block); } static bool sdma_v3_0_is_idle(void *handle) @@ -1214,11 +1210,11 @@ static bool sdma_v3_0_is_idle(void *handle) return true; } -static int sdma_v3_0_wait_for_idle(void *handle) +static int sdma_v3_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK | @@ -1231,9 +1227,9 @@ static int sdma_v3_0_wait_for_idle(void *handle) return -ETIMEDOUT; } -static bool sdma_v3_0_check_soft_reset(void *handle) +static bool sdma_v3_0_check_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 srbm_soft_reset = 0; u32 tmp = RREG32(mmSRBM_STATUS2); @@ -1252,9 +1248,9 @@ static bool sdma_v3_0_check_soft_reset(void *handle) } } -static int sdma_v3_0_pre_soft_reset(void *handle) +static int sdma_v3_0_pre_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 srbm_soft_reset = 0; if (!adev->sdma.srbm_soft_reset) @@ -1271,9 +1267,9 @@ static int sdma_v3_0_pre_soft_reset(void *handle) return 0; } -static int sdma_v3_0_post_soft_reset(void *handle) +static int sdma_v3_0_post_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 srbm_soft_reset = 0; if (!adev->sdma.srbm_soft_reset) @@ -1290,9 +1286,9 @@ static int sdma_v3_0_post_soft_reset(void *handle) return 0; } -static int sdma_v3_0_soft_reset(void *handle) +static int sdma_v3_0_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 srbm_soft_reset = 0; u32 tmp; @@ -1538,7 +1534,6 @@ static void sdma_v3_0_get_clockgating_state(void *handle, u64 *flags) static const struct amd_ip_funcs sdma_v3_0_ip_funcs = { .name = "sdma_v3_0", .early_init = sdma_v3_0_early_init, - .late_init = NULL, .sw_init = sdma_v3_0_sw_init, .sw_fini = sdma_v3_0_sw_fini, .hw_init = sdma_v3_0_hw_init, @@ -1554,8 +1549,6 @@ static const struct amd_ip_funcs sdma_v3_0_ip_funcs = { .set_clockgating_state = sdma_v3_0_set_clockgating_state, .set_powergating_state = sdma_v3_0_set_powergating_state, .get_clockgating_state = sdma_v3_0_get_clockgating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 23ef4eb36b40..c1f98f6cf20d 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1751,9 +1751,9 @@ static bool sdma_v4_0_fw_support_paging_queue(struct amdgpu_device *adev) } } -static int sdma_v4_0_early_init(void *handle) +static int sdma_v4_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = sdma_v4_0_init_microcode(adev); @@ -1780,9 +1780,9 @@ static int sdma_v4_0_process_ras_data_cb(struct amdgpu_device *adev, void *err_data, struct amdgpu_iv_entry *entry); -static int sdma_v4_0_late_init(void *handle) +static int sdma_v4_0_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; sdma_v4_0_setup_ulv(adev); @@ -1792,11 +1792,11 @@ static int sdma_v4_0_late_init(void *handle) return 0; } -static int sdma_v4_0_sw_init(void *handle) +static int sdma_v4_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_0); uint32_t *ptr; @@ -1929,9 +1929,9 @@ static int sdma_v4_0_sw_init(void *handle) return r; } -static int sdma_v4_0_sw_fini(void *handle) +static int sdma_v4_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; for (i = 0; i < adev->sdma.num_instances; i++) { @@ -1951,9 +1951,9 @@ static int sdma_v4_0_sw_fini(void *handle) return 0; } -static int sdma_v4_0_hw_init(void *handle) +static int sdma_v4_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->flags & AMD_IS_APU) amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false); @@ -1964,9 +1964,9 @@ static int sdma_v4_0_hw_init(void *handle) return sdma_v4_0_start(adev); } -static int sdma_v4_0_hw_fini(void *handle) +static int sdma_v4_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; if (amdgpu_sriov_vf(adev)) @@ -1988,9 +1988,9 @@ static int sdma_v4_0_hw_fini(void *handle) return 0; } -static int sdma_v4_0_suspend(void *handle) +static int sdma_v4_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* SMU saves SDMA state for us */ if (adev->in_s0ix) { @@ -1998,12 +1998,12 @@ static int sdma_v4_0_suspend(void *handle) return 0; } - return sdma_v4_0_hw_fini(adev); + return sdma_v4_0_hw_fini(ip_block); } -static int sdma_v4_0_resume(void *handle) +static int sdma_v4_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* SMU restores SDMA state for us */ if (adev->in_s0ix) { @@ -2012,7 +2012,7 @@ static int sdma_v4_0_resume(void *handle) return 0; } - return sdma_v4_0_hw_init(adev); + return sdma_v4_0_hw_init(ip_block); } static bool sdma_v4_0_is_idle(void *handle) @@ -2030,11 +2030,11 @@ static bool sdma_v4_0_is_idle(void *handle) return true; } -static int sdma_v4_0_wait_for_idle(void *handle) +static int sdma_v4_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i, j; u32 sdma[AMDGPU_MAX_SDMA_INSTANCES]; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { for (j = 0; j < adev->sdma.num_instances; j++) { @@ -2049,7 +2049,7 @@ static int sdma_v4_0_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int sdma_v4_0_soft_reset(void *handle) +static int sdma_v4_0_soft_reset(struct amdgpu_ip_block *ip_block) { /* todo */ @@ -2350,9 +2350,9 @@ static void sdma_v4_0_get_clockgating_state(void *handle, u64 *flags) *flags |= AMD_CG_SUPPORT_SDMA_LS; } -static void sdma_v4_0_print_ip_state(void *handle, struct drm_printer *p) +static void sdma_v4_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_0); uint32_t instance_offset; @@ -2371,9 +2371,9 @@ static void sdma_v4_0_print_ip_state(void *handle, struct drm_printer *p) } } -static void sdma_v4_0_dump_ip_state(void *handle) +static void sdma_v4_0_dump_ip_state(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t instance_offset; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_0); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c index c77889040760..a38553f38fdc 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c @@ -1290,9 +1290,9 @@ static bool sdma_v4_4_2_fw_support_paging_queue(struct amdgpu_device *adev) } } -static int sdma_v4_4_2_early_init(void *handle) +static int sdma_v4_4_2_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = sdma_v4_4_2_init_microcode(adev); @@ -1318,9 +1318,9 @@ static int sdma_v4_4_2_process_ras_data_cb(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry); #endif -static int sdma_v4_4_2_late_init(void *handle) +static int sdma_v4_4_2_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; #if 0 struct ras_ih_if ih_info = { .cb = sdma_v4_4_2_process_ras_data_cb, @@ -1332,11 +1332,11 @@ static int sdma_v4_4_2_late_init(void *handle) return 0; } -static int sdma_v4_4_2_sw_init(void *handle) +static int sdma_v4_4_2_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 aid_id; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_4_2); uint32_t *ptr; @@ -1430,6 +1430,10 @@ static int sdma_v4_4_2_sw_init(void *handle) } } + /* TODO: Add queue reset mask when FW fully supports it */ + adev->sdma.supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring); + if (amdgpu_sdma_ras_sw_init(adev)) { dev_err(adev->dev, "fail to initialize sdma ras block\n"); return -EINVAL; @@ -1442,12 +1446,16 @@ static int sdma_v4_4_2_sw_init(void *handle) else DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n"); + r = amdgpu_sdma_sysfs_reset_mask_init(adev); + if (r) + return r; + return r; } -static int sdma_v4_4_2_sw_fini(void *handle) +static int sdma_v4_4_2_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; for (i = 0; i < adev->sdma.num_instances; i++) { @@ -1456,6 +1464,7 @@ static int sdma_v4_4_2_sw_fini(void *handle) amdgpu_ring_fini(&adev->sdma.instance[i].page); } + amdgpu_sdma_sysfs_reset_mask_fini(adev); if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 2) || amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 5)) amdgpu_sdma_destroy_inst_ctx(adev, true); @@ -1467,10 +1476,10 @@ static int sdma_v4_4_2_sw_fini(void *handle) return 0; } -static int sdma_v4_4_2_hw_init(void *handle) +static int sdma_v4_4_2_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t inst_mask; inst_mask = GENMASK(adev->sdma.num_instances - 1, 0); @@ -1482,9 +1491,9 @@ static int sdma_v4_4_2_hw_init(void *handle) return r; } -static int sdma_v4_4_2_hw_fini(void *handle) +static int sdma_v4_4_2_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t inst_mask; int i; @@ -1508,21 +1517,19 @@ static int sdma_v4_4_2_hw_fini(void *handle) static int sdma_v4_4_2_set_clockgating_state(void *handle, enum amd_clockgating_state state); -static int sdma_v4_4_2_suspend(void *handle) +static int sdma_v4_4_2_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_in_reset(adev)) sdma_v4_4_2_set_clockgating_state(adev, AMD_CG_STATE_UNGATE); - return sdma_v4_4_2_hw_fini(adev); + return sdma_v4_4_2_hw_fini(ip_block); } -static int sdma_v4_4_2_resume(void *handle) +static int sdma_v4_4_2_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return sdma_v4_4_2_hw_init(adev); + return sdma_v4_4_2_hw_init(ip_block); } static bool sdma_v4_4_2_is_idle(void *handle) @@ -1540,11 +1547,11 @@ static bool sdma_v4_4_2_is_idle(void *handle) return true; } -static int sdma_v4_4_2_wait_for_idle(void *handle) +static int sdma_v4_4_2_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i, j; u32 sdma[AMDGPU_MAX_SDMA_INSTANCES]; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { for (j = 0; j < adev->sdma.num_instances; j++) { @@ -1559,7 +1566,7 @@ static int sdma_v4_4_2_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int sdma_v4_4_2_soft_reset(void *handle) +static int sdma_v4_4_2_soft_reset(struct amdgpu_ip_block *ip_block) { /* todo */ @@ -1857,9 +1864,9 @@ static void sdma_v4_4_2_get_clockgating_state(void *handle, u64 *flags) *flags |= AMD_CG_SUPPORT_SDMA_LS; } -static void sdma_v4_4_2_print_ip_state(void *handle, struct drm_printer *p) +static void sdma_v4_4_2_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_4_2); uint32_t instance_offset; @@ -1878,9 +1885,9 @@ static void sdma_v4_4_2_print_ip_state(void *handle, struct drm_printer *p) } } -static void sdma_v4_4_2_dump_ip_state(void *handle) +static void sdma_v4_4_2_dump_ip_state(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t instance_offset; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_4_2); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index 3e48ea38385d..fa9b40934957 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -705,14 +705,16 @@ static void sdma_v5_0_enable(struct amdgpu_device *adev, bool enable) } /** - * sdma_v5_0_gfx_resume - setup and start the async dma engines + * sdma_v5_0_gfx_resume_instance - start/restart a certain sdma engine * * @adev: amdgpu_device pointer + * @i: instance + * @restore: used to restore wptr when restart * - * Set up the gfx DMA ring buffers and enable them (NAVI10). - * Returns 0 for success, error for failure. + * Set up the gfx DMA ring buffers and enable them. On restart, we will restore wptr and rptr. + * Return 0 for success. */ -static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev) +static int sdma_v5_0_gfx_resume_instance(struct amdgpu_device *adev, int i, bool restore) { struct amdgpu_ring *ring; u32 rb_cntl, ib_cntl; @@ -722,142 +724,163 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev) u32 temp; u32 wptr_poll_cntl; u64 wptr_gpu_addr; - int i, r; - for (i = 0; i < adev->sdma.num_instances; i++) { - ring = &adev->sdma.instance[i].ring; + ring = &adev->sdma.instance[i].ring; - if (!amdgpu_sriov_vf(adev)) - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0); + if (!amdgpu_sriov_vf(adev)) + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0); - /* Set ring buffer size in dwords */ - rb_bufsz = order_base_2(ring->ring_size / 4); - rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz); + /* Set ring buffer size in dwords */ + rb_bufsz = order_base_2(ring->ring_size / 4); + rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz); #ifdef __BIG_ENDIAN - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1); - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, - RPTR_WRITEBACK_SWAP_ENABLE, 1); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, + RPTR_WRITEBACK_SWAP_ENABLE, 1); #endif - WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); - - /* Initialize the ring buffer's read and write pointers */ + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); + + /* Initialize the ring buffer's read and write pointers */ + if (restore) { + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), lower_32_bits(ring->wptr << 2)); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), upper_32_bits(ring->wptr << 2)); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2)); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2)); + } else { WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0); WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0); WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0); WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0); - - /* setup the wptr shadow polling */ - wptr_gpu_addr = ring->wptr_gpu_addr; - WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO), - lower_32_bits(wptr_gpu_addr)); - WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI), - upper_32_bits(wptr_gpu_addr)); - wptr_poll_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, - mmSDMA0_GFX_RB_WPTR_POLL_CNTL)); - wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, - SDMA0_GFX_RB_WPTR_POLL_CNTL, - F32_POLL_ENABLE, 1); - WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), - wptr_poll_cntl); - - /* set the wb address whether it's enabled or not */ - WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI), - upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF); - WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO), - lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC); - - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1); - - WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), - ring->gpu_addr >> 8); - WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), - ring->gpu_addr >> 40); - + } + /* setup the wptr shadow polling */ + wptr_gpu_addr = ring->wptr_gpu_addr; + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO), + lower_32_bits(wptr_gpu_addr)); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI), + upper_32_bits(wptr_gpu_addr)); + wptr_poll_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, + mmSDMA0_GFX_RB_WPTR_POLL_CNTL)); + wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, + SDMA0_GFX_RB_WPTR_POLL_CNTL, + F32_POLL_ENABLE, 1); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), + wptr_poll_cntl); + + /* set the wb address whether it's enabled or not */ + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI), + upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO), + lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC); + + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1); + + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), + ring->gpu_addr >> 8); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), + ring->gpu_addr >> 40); + + if (!restore) ring->wptr = 0; - /* before programing wptr to a less value, need set minor_ptr_update first */ - WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1); + /* before programing wptr to a less value, need set minor_ptr_update first */ + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1); - if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */ - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), - lower_32_bits(ring->wptr << 2)); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), - upper_32_bits(ring->wptr << 2)); - } + if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */ + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), + lower_32_bits(ring->wptr << 2)); + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), + upper_32_bits(ring->wptr << 2)); + } - doorbell = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL)); - doorbell_offset = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, - mmSDMA0_GFX_DOORBELL_OFFSET)); + doorbell = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL)); + doorbell_offset = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, + mmSDMA0_GFX_DOORBELL_OFFSET)); - if (ring->use_doorbell) { - doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1); - doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET, - OFFSET, ring->doorbell_index); - } else { - doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0); - } - WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell); - WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), - doorbell_offset); + if (ring->use_doorbell) { + doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1); + doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET, + OFFSET, ring->doorbell_index); + } else { + doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0); + } + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), + doorbell_offset); - adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, - ring->doorbell_index, 20); + adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, + ring->doorbell_index, 20); - if (amdgpu_sriov_vf(adev)) - sdma_v5_0_ring_set_wptr(ring); + if (amdgpu_sriov_vf(adev)) + sdma_v5_0_ring_set_wptr(ring); - /* set minor_ptr_update to 0 after wptr programed */ - WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0); + /* set minor_ptr_update to 0 after wptr programed */ + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0); - if (!amdgpu_sriov_vf(adev)) { - /* set utc l1 enable flag always to 1 */ - temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL)); - temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1); - - /* enable MCBP */ - temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp); - - /* Set up RESP_MODE to non-copy addresses */ - temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL)); - temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3); - temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp); - - /* program default cache read and write policy */ - temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE)); - /* clean read policy and write policy bits */ - temp &= 0xFF0FFF; - temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | (CACHE_WRITE_POLICY_L2__DEFAULT << 14)); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp); - } + if (!amdgpu_sriov_vf(adev)) { + /* set utc l1 enable flag always to 1 */ + temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL)); + temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1); + + /* enable MCBP */ + temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1); + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp); + + /* Set up RESP_MODE to non-copy addresses */ + temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL)); + temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3); + temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9); + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp); + + /* program default cache read and write policy */ + temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE)); + /* clean read policy and write policy bits */ + temp &= 0xFF0FFF; + temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | (CACHE_WRITE_POLICY_L2__DEFAULT << 14)); + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp); + } - if (!amdgpu_sriov_vf(adev)) { - /* unhalt engine */ - temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL)); - temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp); - } + if (!amdgpu_sriov_vf(adev)) { + /* unhalt engine */ + temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL)); + temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0); + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp); + } - /* enable DMA RB */ - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1); - WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); + /* enable DMA RB */ + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); - ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL)); - ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1); + ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL)); + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1); #ifdef __BIG_ENDIAN - ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1); + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1); #endif - /* enable DMA IBs */ - WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl); + /* enable DMA IBs */ + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl); - if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */ - sdma_v5_0_ctx_switch_enable(adev, true); - sdma_v5_0_enable(adev, true); - } + if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */ + sdma_v5_0_ctx_switch_enable(adev, true); + sdma_v5_0_enable(adev, true); + } + + return amdgpu_ring_test_helper(ring); +} + +/** + * sdma_v5_0_gfx_resume - setup and start the async dma engines + * + * @adev: amdgpu_device pointer + * + * Set up the gfx DMA ring buffers and enable them (NAVI10). + * Returns 0 for success, error for failure. + */ +static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev) +{ + int i, r; - r = amdgpu_ring_test_helper(ring); + for (i = 0; i < adev->sdma.num_instances; i++) { + r = sdma_v5_0_gfx_resume_instance(adev, i, false); if (r) return r; } @@ -1366,9 +1389,9 @@ static void sdma_v5_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask); } -static int sdma_v5_0_early_init(void *handle) +static int sdma_v5_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = sdma_v5_0_init_microcode(adev); @@ -1385,11 +1408,11 @@ static int sdma_v5_0_early_init(void *handle) } -static int sdma_v5_0_sw_init(void *handle) +static int sdma_v5_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_0); uint32_t *ptr; @@ -1429,6 +1452,19 @@ static int sdma_v5_0_sw_init(void *handle) return r; } + adev->sdma.supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring); + switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { + case IP_VERSION(5, 0, 0): + case IP_VERSION(5, 0, 2): + case IP_VERSION(5, 0, 5): + if (adev->sdma.instance[0].fw_version >= 35) + adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + break; + default: + break; + } + /* Allocate memory for SDMA IP Dump buffer */ ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL); if (ptr) @@ -1436,17 +1472,22 @@ static int sdma_v5_0_sw_init(void *handle) else DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n"); + r = amdgpu_sdma_sysfs_reset_mask_init(adev); + if (r) + return r; + return r; } -static int sdma_v5_0_sw_fini(void *handle) +static int sdma_v5_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; for (i = 0; i < adev->sdma.num_instances; i++) amdgpu_ring_fini(&adev->sdma.instance[i].ring); + amdgpu_sdma_sysfs_reset_mask_fini(adev); amdgpu_sdma_destroy_inst_ctx(adev, false); kfree(adev->sdma.ip_dump); @@ -1454,10 +1495,10 @@ static int sdma_v5_0_sw_fini(void *handle) return 0; } -static int sdma_v5_0_hw_init(void *handle) +static int sdma_v5_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; sdma_v5_0_init_golden_registers(adev); @@ -1466,9 +1507,9 @@ static int sdma_v5_0_hw_init(void *handle) return r; } -static int sdma_v5_0_hw_fini(void *handle) +static int sdma_v5_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) return 0; @@ -1479,18 +1520,14 @@ static int sdma_v5_0_hw_fini(void *handle) return 0; } -static int sdma_v5_0_suspend(void *handle) +static int sdma_v5_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return sdma_v5_0_hw_fini(adev); + return sdma_v5_0_hw_fini(ip_block); } -static int sdma_v5_0_resume(void *handle) +static int sdma_v5_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return sdma_v5_0_hw_init(adev); + return sdma_v5_0_hw_init(ip_block); } static bool sdma_v5_0_is_idle(void *handle) @@ -1508,11 +1545,11 @@ static bool sdma_v5_0_is_idle(void *handle) return true; } -static int sdma_v5_0_wait_for_idle(void *handle) +static int sdma_v5_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; u32 sdma0, sdma1; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { sdma0 = RREG32(sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_STATUS_REG)); @@ -1525,13 +1562,100 @@ static int sdma_v5_0_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int sdma_v5_0_soft_reset(void *handle) +static int sdma_v5_0_soft_reset(struct amdgpu_ip_block *ip_block) { /* todo */ return 0; } +static int sdma_v5_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid) +{ + struct amdgpu_device *adev = ring->adev; + int i, j, r; + u32 rb_cntl, ib_cntl, f32_cntl, freeze, cntl, preempt, soft_reset, stat1_reg; + + if (amdgpu_sriov_vf(adev)) + return -EINVAL; + + for (i = 0; i < adev->sdma.num_instances; i++) { + if (ring == &adev->sdma.instance[i].ring) + break; + } + + if (i == adev->sdma.num_instances) { + DRM_ERROR("sdma instance not found\n"); + return -EINVAL; + } + + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); + + /* stop queue */ + ib_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL)); + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0); + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl); + + rb_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); + + /* engine stop SDMA1_F32_CNTL.HALT to 1 and SDMAx_FREEZE freeze bit to 1 */ + freeze = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE)); + freeze = REG_SET_FIELD(freeze, SDMA0_FREEZE, FREEZE, 1); + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE), freeze); + + for (j = 0; j < adev->usec_timeout; j++) { + freeze = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE)); + if (REG_GET_FIELD(freeze, SDMA0_FREEZE, FROZEN) & 1) + break; + udelay(1); + } + + /* check sdma copy engine all idle if frozen not received*/ + if (j == adev->usec_timeout) { + stat1_reg = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_STATUS1_REG)); + if ((stat1_reg & 0x3FF) != 0x3FF) { + DRM_ERROR("cannot soft reset as sdma not idle\n"); + r = -ETIMEDOUT; + goto err0; + } + } + + f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL)); + f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1); + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl); + + cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL)); + cntl = REG_SET_FIELD(cntl, SDMA0_CNTL, UTC_L1_ENABLE, 0); + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), cntl); + + /* soft reset SDMA_GFX_PREEMPT.IB_PREEMPT = 0 mmGRBM_SOFT_RESET.SOFT_RESET_SDMA0/1 = 1 */ + preempt = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT)); + preempt = REG_SET_FIELD(preempt, SDMA0_GFX_PREEMPT, IB_PREEMPT, 0); + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT), preempt); + + soft_reset = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); + soft_reset |= 1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i; + + WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset); + + udelay(50); + + soft_reset &= ~(1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i); + WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset); + + /* unfreeze*/ + freeze = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE)); + freeze = REG_SET_FIELD(freeze, SDMA0_FREEZE, FREEZE, 0); + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE), freeze); + + r = sdma_v5_0_gfx_resume_instance(adev, i, true); + +err0: + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); + return r; +} + static int sdma_v5_0_ring_preempt_ib(struct amdgpu_ring *ring) { int i, r = 0; @@ -1778,9 +1902,9 @@ static void sdma_v5_0_get_clockgating_state(void *handle, u64 *flags) *flags |= AMD_CG_SUPPORT_SDMA_LS; } -static void sdma_v5_0_print_ip_state(void *handle, struct drm_printer *p) +static void sdma_v5_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_0); uint32_t instance_offset; @@ -1799,9 +1923,9 @@ static void sdma_v5_0_print_ip_state(void *handle, struct drm_printer *p) } } -static void sdma_v5_0_dump_ip_state(void *handle) +static void sdma_v5_0_dump_ip_state(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t instance_offset; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_0); @@ -1823,7 +1947,6 @@ static void sdma_v5_0_dump_ip_state(void *handle) static const struct amd_ip_funcs sdma_v5_0_ip_funcs = { .name = "sdma_v5_0", .early_init = sdma_v5_0_early_init, - .late_init = NULL, .sw_init = sdma_v5_0_sw_init, .sw_fini = sdma_v5_0_sw_fini, .hw_init = sdma_v5_0_hw_init, @@ -1874,6 +1997,7 @@ static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = { .emit_reg_write_reg_wait = sdma_v5_0_ring_emit_reg_write_reg_wait, .init_cond_exec = sdma_v5_0_ring_init_cond_exec, .preempt_ib = sdma_v5_0_ring_preempt_ib, + .reset = sdma_v5_0_reset_queue, }; static void sdma_v5_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c index bc9b240a3488..ba5160399ab2 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c @@ -522,14 +522,17 @@ static void sdma_v5_2_enable(struct amdgpu_device *adev, bool enable) } /** - * sdma_v5_2_gfx_resume - setup and start the async dma engines + * sdma_v5_2_gfx_resume_instance - start/restart a certain sdma engine * * @adev: amdgpu_device pointer + * @i: instance + * @restore: used to restore wptr when restart * - * Set up the gfx DMA ring buffers and enable them. - * Returns 0 for success, error for failure. + * Set up the gfx DMA ring buffers and enable them. On restart, we will restore wptr and rptr. + * Return 0 for success. */ -static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev) + +static int sdma_v5_2_gfx_resume_instance(struct amdgpu_device *adev, int i, bool restore) { struct amdgpu_ring *ring; u32 rb_cntl, ib_cntl; @@ -539,139 +542,161 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev) u32 temp; u32 wptr_poll_cntl; u64 wptr_gpu_addr; - int i, r; - for (i = 0; i < adev->sdma.num_instances; i++) { - ring = &adev->sdma.instance[i].ring; + ring = &adev->sdma.instance[i].ring; - if (!amdgpu_sriov_vf(adev)) - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0); + if (!amdgpu_sriov_vf(adev)) + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0); - /* Set ring buffer size in dwords */ - rb_bufsz = order_base_2(ring->ring_size / 4); - rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz); + /* Set ring buffer size in dwords */ + rb_bufsz = order_base_2(ring->ring_size / 4); + rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz); #ifdef __BIG_ENDIAN - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1); - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, - RPTR_WRITEBACK_SWAP_ENABLE, 1); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, + RPTR_WRITEBACK_SWAP_ENABLE, 1); #endif - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); - - /* Initialize the ring buffer's read and write pointers */ + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); + + /* Initialize the ring buffer's read and write pointers */ + if (restore) { + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), lower_32_bits(ring->wptr << 2)); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), upper_32_bits(ring->wptr << 2)); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2)); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2)); + } else { WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0); WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0); WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0); WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0); + } - /* setup the wptr shadow polling */ - wptr_gpu_addr = ring->wptr_gpu_addr; - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO), - lower_32_bits(wptr_gpu_addr)); - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI), - upper_32_bits(wptr_gpu_addr)); - wptr_poll_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, - mmSDMA0_GFX_RB_WPTR_POLL_CNTL)); - wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, - SDMA0_GFX_RB_WPTR_POLL_CNTL, - F32_POLL_ENABLE, 1); - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), - wptr_poll_cntl); - - /* set the wb address whether it's enabled or not */ - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI), - upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF); - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO), - lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC); - - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1); - - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8); - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40); - + /* setup the wptr shadow polling */ + wptr_gpu_addr = ring->wptr_gpu_addr; + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO), + lower_32_bits(wptr_gpu_addr)); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI), + upper_32_bits(wptr_gpu_addr)); + wptr_poll_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, + mmSDMA0_GFX_RB_WPTR_POLL_CNTL)); + wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, + SDMA0_GFX_RB_WPTR_POLL_CNTL, + F32_POLL_ENABLE, 1); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), + wptr_poll_cntl); + + /* set the wb address whether it's enabled or not */ + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI), + upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO), + lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC); + + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1); + + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40); + + if (!restore) ring->wptr = 0; - /* before programing wptr to a less value, need set minor_ptr_update first */ - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1); + /* before programing wptr to a less value, need set minor_ptr_update first */ + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1); - if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */ - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2)); - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2)); - } + if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */ + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2)); + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2)); + } - doorbell = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL)); - doorbell_offset = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET)); + doorbell = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL)); + doorbell_offset = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET)); - if (ring->use_doorbell) { - doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1); - doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET, - OFFSET, ring->doorbell_index); - } else { - doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0); - } - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell); - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset); + if (ring->use_doorbell) { + doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1); + doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET, + OFFSET, ring->doorbell_index); + } else { + doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0); + } + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset); - adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, - ring->doorbell_index, - adev->doorbell_index.sdma_doorbell_range); + adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, + ring->doorbell_index, + adev->doorbell_index.sdma_doorbell_range); - if (amdgpu_sriov_vf(adev)) - sdma_v5_2_ring_set_wptr(ring); + if (amdgpu_sriov_vf(adev)) + sdma_v5_2_ring_set_wptr(ring); - /* set minor_ptr_update to 0 after wptr programed */ + /* set minor_ptr_update to 0 after wptr programed */ - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0); - /* SRIOV VF has no control of any of registers below */ - if (!amdgpu_sriov_vf(adev)) { - /* set utc l1 enable flag always to 1 */ - temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL)); - temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1); - - /* enable MCBP */ - temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1); - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), temp); - - /* Set up RESP_MODE to non-copy addresses */ - temp = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL)); - temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3); - temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9); - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp); - - /* program default cache read and write policy */ - temp = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE)); - /* clean read policy and write policy bits */ - temp &= 0xFF0FFF; - temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | - (CACHE_WRITE_POLICY_L2__DEFAULT << 14) | - SDMA0_UTCL1_PAGE__LLC_NOALLOC_MASK); - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp); - - /* unhalt engine */ - temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL)); - temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0); - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp); - } + /* SRIOV VF has no control of any of registers below */ + if (!amdgpu_sriov_vf(adev)) { + /* set utc l1 enable flag always to 1 */ + temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL)); + temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1); + + /* enable MCBP */ + temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1); + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), temp); + + /* Set up RESP_MODE to non-copy addresses */ + temp = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL)); + temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3); + temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp); + + /* program default cache read and write policy */ + temp = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE)); + /* clean read policy and write policy bits */ + temp &= 0xFF0FFF; + temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | + (CACHE_WRITE_POLICY_L2__DEFAULT << 14) | + SDMA0_UTCL1_PAGE__LLC_NOALLOC_MASK); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp); + + /* unhalt engine */ + temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL)); + temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0); + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp); + } - /* enable DMA RB */ - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1); - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); + /* enable DMA RB */ + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); - ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL)); - ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1); + ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL)); + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1); #ifdef __BIG_ENDIAN - ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1); + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1); #endif - /* enable DMA IBs */ - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl); + /* enable DMA IBs */ + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl); - if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */ - sdma_v5_2_ctx_switch_enable(adev, true); - sdma_v5_2_enable(adev, true); - } + if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */ + sdma_v5_2_ctx_switch_enable(adev, true); + sdma_v5_2_enable(adev, true); + } + + return amdgpu_ring_test_helper(ring); +} - r = amdgpu_ring_test_helper(ring); +/** + * sdma_v5_2_gfx_resume - setup and start the async dma engines + * + * @adev: amdgpu_device pointer + * + * Set up the gfx DMA ring buffers and enable them. + * Returns 0 for success, error for failure. + */ +static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev) +{ + int i, r; + + for (i = 0; i < adev->sdma.num_instances; i++) { + r = sdma_v5_2_gfx_resume_instance(adev, i, false); if (r) return r; } @@ -736,9 +761,9 @@ static int sdma_v5_2_load_microcode(struct amdgpu_device *adev) return 0; } -static int sdma_v5_2_soft_reset(void *handle) +static int sdma_v5_2_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 grbm_soft_reset; u32 tmp; int i; @@ -778,6 +803,7 @@ static int sdma_v5_2_soft_reset(void *handle) static int sdma_v5_2_start(struct amdgpu_device *adev) { int r = 0; + struct amdgpu_ip_block *ip_block; if (amdgpu_sriov_vf(adev)) { sdma_v5_2_ctx_switch_enable(adev, false); @@ -798,7 +824,11 @@ static int sdma_v5_2_start(struct amdgpu_device *adev) msleep(1000); } - sdma_v5_2_soft_reset(adev); + ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SDMA); + if (!ip_block) + return -EINVAL; + + sdma_v5_2_soft_reset(ip_block); /* unhalt the MEs */ sdma_v5_2_enable(adev, true); /* enable sdma ring preemption */ @@ -1180,7 +1210,28 @@ static void sdma_v5_2_ring_emit_pipeline_sync(struct amdgpu_ring *ring) static void sdma_v5_2_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned vmid, uint64_t pd_addr) { - amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); + struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub]; + uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0); + + /* Update the PD address for this VMID. */ + amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + + (hub->ctx_addr_distance * vmid), + lower_32_bits(pd_addr)); + amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + + (hub->ctx_addr_distance * vmid), + upper_32_bits(pd_addr)); + + /* Trigger invalidation. */ + amdgpu_ring_write(ring, + SDMA_PKT_VM_INVALIDATION_HEADER_OP(SDMA_OP_POLL_REGMEM) | + SDMA_PKT_VM_INVALIDATION_HEADER_SUB_OP(SDMA_SUBOP_VM_INVALIDATION) | + SDMA_PKT_VM_INVALIDATION_HEADER_GFX_ENG_ID(ring->vm_inv_eng) | + SDMA_PKT_VM_INVALIDATION_HEADER_MM_ENG_ID(0x1f)); + amdgpu_ring_write(ring, req); + amdgpu_ring_write(ring, 0xFFFFFFFF); + amdgpu_ring_write(ring, + SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_INVALIDATEACK(1 << vmid) | + SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_ADDRESSRANGEHI(0x1F)); } static void sdma_v5_2_ring_emit_wreg(struct amdgpu_ring *ring, @@ -1216,9 +1267,9 @@ static void sdma_v5_2_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask); } -static int sdma_v5_2_early_init(void *handle) +static int sdma_v5_2_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_sdma_init_microcode(adev, 0, true); @@ -1268,11 +1319,11 @@ static unsigned sdma_v5_2_seq_to_trap_id(int seq_num) return -EINVAL; } -static int sdma_v5_2_sw_init(void *handle) +static int sdma_v5_2_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_2); uint32_t *ptr; @@ -1306,6 +1357,24 @@ static int sdma_v5_2_sw_init(void *handle) return r; } + adev->sdma.supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring); + switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { + case IP_VERSION(5, 2, 0): + case IP_VERSION(5, 2, 2): + case IP_VERSION(5, 2, 3): + case IP_VERSION(5, 2, 4): + if (adev->sdma.instance[0].fw_version >= 76) + adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + break; + case IP_VERSION(5, 2, 5): + if (adev->sdma.instance[0].fw_version >= 34) + adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + break; + default: + break; + } + /* Allocate memory for SDMA IP Dump buffer */ ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL); if (ptr) @@ -1313,17 +1382,22 @@ static int sdma_v5_2_sw_init(void *handle) else DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n"); + r = amdgpu_sdma_sysfs_reset_mask_init(adev); + if (r) + return r; + return r; } -static int sdma_v5_2_sw_fini(void *handle) +static int sdma_v5_2_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; for (i = 0; i < adev->sdma.num_instances; i++) amdgpu_ring_fini(&adev->sdma.instance[i].ring); + amdgpu_sdma_sysfs_reset_mask_fini(adev); amdgpu_sdma_destroy_inst_ctx(adev, true); kfree(adev->sdma.ip_dump); @@ -1331,16 +1405,16 @@ static int sdma_v5_2_sw_fini(void *handle) return 0; } -static int sdma_v5_2_hw_init(void *handle) +static int sdma_v5_2_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return sdma_v5_2_start(adev); } -static int sdma_v5_2_hw_fini(void *handle) +static int sdma_v5_2_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) return 0; @@ -1351,18 +1425,14 @@ static int sdma_v5_2_hw_fini(void *handle) return 0; } -static int sdma_v5_2_suspend(void *handle) +static int sdma_v5_2_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return sdma_v5_2_hw_fini(adev); + return sdma_v5_2_hw_fini(ip_block); } -static int sdma_v5_2_resume(void *handle) +static int sdma_v5_2_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return sdma_v5_2_hw_init(adev); + return sdma_v5_2_hw_init(ip_block); } static bool sdma_v5_2_is_idle(void *handle) @@ -1380,11 +1450,11 @@ static bool sdma_v5_2_is_idle(void *handle) return true; } -static int sdma_v5_2_wait_for_idle(void *handle) +static int sdma_v5_2_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; u32 sdma0, sdma1, sdma2, sdma3; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { sdma0 = RREG32(sdma_v5_2_get_reg_offset(adev, 0, mmSDMA0_STATUS_REG)); @@ -1399,6 +1469,96 @@ static int sdma_v5_2_wait_for_idle(void *handle) return -ETIMEDOUT; } +static int sdma_v5_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid) +{ + struct amdgpu_device *adev = ring->adev; + int i, j, r; + u32 rb_cntl, ib_cntl, f32_cntl, freeze, cntl, preempt, soft_reset, stat1_reg; + + if (amdgpu_sriov_vf(adev)) + return -EINVAL; + + for (i = 0; i < adev->sdma.num_instances; i++) { + if (ring == &adev->sdma.instance[i].ring) + break; + } + + if (i == adev->sdma.num_instances) { + DRM_ERROR("sdma instance not found\n"); + return -EINVAL; + } + + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); + + /* stop queue */ + ib_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL)); + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0); + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl); + + rb_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); + + /*engine stop SDMA1_F32_CNTL.HALT to 1 and SDMAx_FREEZE freeze bit to 1 */ + freeze = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE)); + freeze = REG_SET_FIELD(freeze, SDMA0_FREEZE, FREEZE, 1); + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE), freeze); + + for (j = 0; j < adev->usec_timeout; j++) { + freeze = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE)); + + if (REG_GET_FIELD(freeze, SDMA0_FREEZE, FROZEN) & 1) + break; + udelay(1); + } + + + if (j == adev->usec_timeout) { + stat1_reg = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_STATUS1_REG)); + if ((stat1_reg & 0x3FF) != 0x3FF) { + DRM_ERROR("cannot soft reset as sdma not idle\n"); + r = -ETIMEDOUT; + goto err0; + } + } + + f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL)); + f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1); + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl); + + cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL)); + cntl = REG_SET_FIELD(cntl, SDMA0_CNTL, UTC_L1_ENABLE, 0); + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), cntl); + + /* soft reset SDMA_GFX_PREEMPT.IB_PREEMPT = 0 mmGRBM_SOFT_RESET.SOFT_RESET_SDMA0/1 = 1 */ + preempt = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT)); + preempt = REG_SET_FIELD(preempt, SDMA0_GFX_PREEMPT, IB_PREEMPT, 0); + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT), preempt); + + soft_reset = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); + soft_reset |= 1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i; + + + WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset); + + udelay(50); + + soft_reset &= ~(1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i); + + WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset); + + /* unfreeze and unhalt */ + freeze = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE)); + freeze = REG_SET_FIELD(freeze, SDMA0_FREEZE, FREEZE, 0); + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE), freeze); + + r = sdma_v5_2_gfx_resume_instance(adev, i, true); + +err0: + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); + return r; +} + static int sdma_v5_2_ring_preempt_ib(struct amdgpu_ring *ring) { int i, r = 0; @@ -1736,9 +1896,9 @@ static void sdma_v5_2_ring_end_use(struct amdgpu_ring *ring) amdgpu_gfx_off_ctrl(adev, true); } -static void sdma_v5_2_print_ip_state(void *handle, struct drm_printer *p) +static void sdma_v5_2_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_2); uint32_t instance_offset; @@ -1757,9 +1917,9 @@ static void sdma_v5_2_print_ip_state(void *handle, struct drm_printer *p) } } -static void sdma_v5_2_dump_ip_state(void *handle) +static void sdma_v5_2_dump_ip_state(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t instance_offset; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_2); @@ -1781,7 +1941,6 @@ static void sdma_v5_2_dump_ip_state(void *handle) static const struct amd_ip_funcs sdma_v5_2_ip_funcs = { .name = "sdma_v5_2", .early_init = sdma_v5_2_early_init, - .late_init = NULL, .sw_init = sdma_v5_2_sw_init, .sw_fini = sdma_v5_2_sw_fini, .hw_init = sdma_v5_2_hw_init, @@ -1834,6 +1993,7 @@ static const struct amdgpu_ring_funcs sdma_v5_2_ring_funcs = { .emit_reg_write_reg_wait = sdma_v5_2_ring_emit_reg_write_reg_wait, .init_cond_exec = sdma_v5_2_ring_init_cond_exec, .preempt_ib = sdma_v5_2_ring_preempt_ib, + .reset = sdma_v5_2_reset_queue, }; static void sdma_v5_2_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c index 208a1fa9d4e7..d46128b0ec92 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c @@ -469,14 +469,16 @@ static void sdma_v6_0_enable(struct amdgpu_device *adev, bool enable) } /** - * sdma_v6_0_gfx_resume - setup and start the async dma engines + * sdma_v6_0_gfx_resume_instance - start/restart a certain sdma engine * * @adev: amdgpu_device pointer + * @i: instance + * @restore: used to restore wptr when restart * - * Set up the gfx DMA ring buffers and enable them. - * Returns 0 for success, error for failure. + * Set up the gfx DMA ring buffers and enable them. On restart, we will restore wptr and rptr. + * Return 0 for success. */ -static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev) +static int sdma_v6_0_gfx_resume_instance(struct amdgpu_device *adev, int i, bool restore) { struct amdgpu_ring *ring; u32 rb_cntl, ib_cntl; @@ -485,132 +487,152 @@ static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev) u32 doorbell_offset; u32 temp; u64 wptr_gpu_addr; - int i, r; - - for (i = 0; i < adev->sdma.num_instances; i++) { - ring = &adev->sdma.instance[i].ring; - if (!amdgpu_sriov_vf(adev)) - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0); + ring = &adev->sdma.instance[i].ring; + if (!amdgpu_sriov_vf(adev)) + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0); - /* Set ring buffer size in dwords */ - rb_bufsz = order_base_2(ring->ring_size / 4); - rb_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL)); - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_SIZE, rb_bufsz); + /* Set ring buffer size in dwords */ + rb_bufsz = order_base_2(ring->ring_size / 4); + rb_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL)); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_SIZE, rb_bufsz); #ifdef __BIG_ENDIAN - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_SWAP_ENABLE, 1); - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, - RPTR_WRITEBACK_SWAP_ENABLE, 1); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_SWAP_ENABLE, 1); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, + RPTR_WRITEBACK_SWAP_ENABLE, 1); #endif - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_PRIV, 1); - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl); - - /* Initialize the ring buffer's read and write pointers */ + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_PRIV, 1); + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl); + + /* Initialize the ring buffer's read and write pointers */ + if (restore) { + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR), lower_32_bits(ring->wptr << 2)); + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_HI), upper_32_bits(ring->wptr << 2)); + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), lower_32_bits(ring->wptr << 2)); + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), upper_32_bits(ring->wptr << 2)); + } else { WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR), 0); WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_HI), 0); WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), 0); WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), 0); + } + /* setup the wptr shadow polling */ + wptr_gpu_addr = ring->wptr_gpu_addr; + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_LO), + lower_32_bits(wptr_gpu_addr)); + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_HI), + upper_32_bits(wptr_gpu_addr)); + + /* set the wb address whether it's enabled or not */ + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_ADDR_HI), + upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF); + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_ADDR_LO), + lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC); + + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 0); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, F32_WPTR_POLL_ENABLE, 1); + + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE), ring->gpu_addr >> 8); + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE_HI), ring->gpu_addr >> 40); + + if (!restore) + ring->wptr = 0; - /* setup the wptr shadow polling */ - wptr_gpu_addr = ring->wptr_gpu_addr; - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_LO), - lower_32_bits(wptr_gpu_addr)); - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_HI), - upper_32_bits(wptr_gpu_addr)); - - /* set the wb address whether it's enabled or not */ - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_ADDR_HI), - upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF); - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_ADDR_LO), - lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC); - - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1); - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 0); - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, F32_WPTR_POLL_ENABLE, 1); + /* before programing wptr to a less value, need set minor_ptr_update first */ + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 1); - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE), ring->gpu_addr >> 8); - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE_HI), ring->gpu_addr >> 40); + if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */ + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), lower_32_bits(ring->wptr) << 2); + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2); + } - ring->wptr = 0; + doorbell = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL)); + doorbell_offset = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL_OFFSET)); - /* before programing wptr to a less value, need set minor_ptr_update first */ - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 1); + if (ring->use_doorbell) { + doorbell = REG_SET_FIELD(doorbell, SDMA0_QUEUE0_DOORBELL, ENABLE, 1); + doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_QUEUE0_DOORBELL_OFFSET, + OFFSET, ring->doorbell_index); + } else { + doorbell = REG_SET_FIELD(doorbell, SDMA0_QUEUE0_DOORBELL, ENABLE, 0); + } + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL), doorbell); + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL_OFFSET), doorbell_offset); - if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */ - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), lower_32_bits(ring->wptr) << 2); - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2); - } + if (i == 0) + adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, + ring->doorbell_index, + adev->doorbell_index.sdma_doorbell_range * adev->sdma.num_instances); - doorbell = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL)); - doorbell_offset = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL_OFFSET)); + if (amdgpu_sriov_vf(adev)) + sdma_v6_0_ring_set_wptr(ring); + + /* set minor_ptr_update to 0 after wptr programed */ + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 0); + + /* Set up sdma hang watchdog */ + temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_WATCHDOG_CNTL)); + /* 100ms per unit */ + temp = REG_SET_FIELD(temp, SDMA0_WATCHDOG_CNTL, QUEUE_HANG_COUNT, + max(adev->usec_timeout/100000, 1)); + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_WATCHDOG_CNTL), temp); + + /* Set up RESP_MODE to non-copy addresses */ + temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL)); + temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3); + temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9); + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL), temp); + + /* program default cache read and write policy */ + temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_PAGE)); + /* clean read policy and write policy bits */ + temp &= 0xFF0FFF; + temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | + (CACHE_WRITE_POLICY_L2__DEFAULT << 14) | + SDMA0_UTCL1_PAGE__LLC_NOALLOC_MASK); + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_PAGE), temp); - if (ring->use_doorbell) { - doorbell = REG_SET_FIELD(doorbell, SDMA0_QUEUE0_DOORBELL, ENABLE, 1); - doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_QUEUE0_DOORBELL_OFFSET, - OFFSET, ring->doorbell_index); - } else { - doorbell = REG_SET_FIELD(doorbell, SDMA0_QUEUE0_DOORBELL, ENABLE, 0); - } - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL), doorbell); - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL_OFFSET), doorbell_offset); - - if (i == 0) - adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, - ring->doorbell_index, - adev->doorbell_index.sdma_doorbell_range * adev->sdma.num_instances); - - if (amdgpu_sriov_vf(adev)) - sdma_v6_0_ring_set_wptr(ring); - - /* set minor_ptr_update to 0 after wptr programed */ - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 0); - - /* Set up sdma hang watchdog */ - temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_WATCHDOG_CNTL)); - /* 100ms per unit */ - temp = REG_SET_FIELD(temp, SDMA0_WATCHDOG_CNTL, QUEUE_HANG_COUNT, - max(adev->usec_timeout/100000, 1)); - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_WATCHDOG_CNTL), temp); - - /* Set up RESP_MODE to non-copy addresses */ - temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL)); - temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3); - temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9); - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL), temp); - - /* program default cache read and write policy */ - temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_PAGE)); - /* clean read policy and write policy bits */ - temp &= 0xFF0FFF; - temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | - (CACHE_WRITE_POLICY_L2__DEFAULT << 14) | - SDMA0_UTCL1_PAGE__LLC_NOALLOC_MASK); - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_PAGE), temp); - - if (!amdgpu_sriov_vf(adev)) { - /* unhalt engine */ - temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL)); - temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0); - temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, TH1_RESET, 0); - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL), temp); - } + if (!amdgpu_sriov_vf(adev)) { + /* unhalt engine */ + temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL)); + temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0); + temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, TH1_RESET, 0); + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL), temp); + } - /* enable DMA RB */ - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_ENABLE, 1); - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl); + /* enable DMA RB */ + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_ENABLE, 1); + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl); - ib_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL)); - ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_ENABLE, 1); + ib_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL)); + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_ENABLE, 1); #ifdef __BIG_ENDIAN - ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_SWAP_ENABLE, 1); + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_SWAP_ENABLE, 1); #endif - /* enable DMA IBs */ - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL), ib_cntl); + /* enable DMA IBs */ + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL), ib_cntl); + + if (amdgpu_sriov_vf(adev)) + sdma_v6_0_enable(adev, true); + + return amdgpu_ring_test_helper(ring); +} - if (amdgpu_sriov_vf(adev)) - sdma_v6_0_enable(adev, true); +/** + * sdma_v6_0_gfx_resume - setup and start the async dma engines + * + * @adev: amdgpu_device pointer + * + * Set up the gfx DMA ring buffers and enable them. + * Returns 0 for success, error for failure. + */ +static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev) +{ + int i, r; - r = amdgpu_ring_test_helper(ring); + for (i = 0; i < adev->sdma.num_instances; i++) { + r = sdma_v6_0_gfx_resume_instance(adev, i, false); if (r) return r; } @@ -733,9 +755,9 @@ static int sdma_v6_0_load_microcode(struct amdgpu_device *adev) return 0; } -static int sdma_v6_0_soft_reset(void *handle) +static int sdma_v6_0_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 tmp; int i; @@ -769,9 +791,9 @@ static int sdma_v6_0_soft_reset(void *handle) return sdma_v6_0_start(adev); } -static bool sdma_v6_0_check_soft_reset(void *handle) +static bool sdma_v6_0_check_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int i, r; long tmo = msecs_to_jiffies(1000); @@ -1272,9 +1294,9 @@ static void sdma_v6_0_set_ras_funcs(struct amdgpu_device *adev) } } -static int sdma_v6_0_early_init(void *handle) +static int sdma_v6_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_sdma_init_microcode(adev, 0, true); @@ -1291,11 +1313,11 @@ static int sdma_v6_0_early_init(void *handle) return 0; } -static int sdma_v6_0_sw_init(void *handle) +static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_6_0); uint32_t *ptr; @@ -1328,6 +1350,19 @@ static int sdma_v6_0_sw_init(void *handle) return r; } + adev->sdma.supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring); + switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { + case IP_VERSION(6, 0, 0): + case IP_VERSION(6, 0, 2): + case IP_VERSION(6, 0, 3): + if (adev->sdma.instance[0].fw_version >= 21) + adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + break; + default: + break; + } + if (amdgpu_sdma_ras_sw_init(adev)) { dev_err(adev->dev, "Failed to initialize sdma ras block!\n"); return -EINVAL; @@ -1340,17 +1375,22 @@ static int sdma_v6_0_sw_init(void *handle) else DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n"); + r = amdgpu_sdma_sysfs_reset_mask_init(adev); + if (r) + return r; + return r; } -static int sdma_v6_0_sw_fini(void *handle) +static int sdma_v6_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; for (i = 0; i < adev->sdma.num_instances; i++) amdgpu_ring_fini(&adev->sdma.instance[i].ring); + amdgpu_sdma_sysfs_reset_mask_fini(adev); amdgpu_sdma_destroy_inst_ctx(adev, true); kfree(adev->sdma.ip_dump); @@ -1358,16 +1398,16 @@ static int sdma_v6_0_sw_fini(void *handle) return 0; } -static int sdma_v6_0_hw_init(void *handle) +static int sdma_v6_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return sdma_v6_0_start(adev); } -static int sdma_v6_0_hw_fini(void *handle) +static int sdma_v6_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) return 0; @@ -1378,18 +1418,14 @@ static int sdma_v6_0_hw_fini(void *handle) return 0; } -static int sdma_v6_0_suspend(void *handle) +static int sdma_v6_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return sdma_v6_0_hw_fini(adev); + return sdma_v6_0_hw_fini(ip_block); } -static int sdma_v6_0_resume(void *handle) +static int sdma_v6_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return sdma_v6_0_hw_init(adev); + return sdma_v6_0_hw_init(ip_block); } static bool sdma_v6_0_is_idle(void *handle) @@ -1407,11 +1443,11 @@ static bool sdma_v6_0_is_idle(void *handle) return true; } -static int sdma_v6_0_wait_for_idle(void *handle) +static int sdma_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; u32 sdma0, sdma1; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { sdma0 = RREG32(sdma_v6_0_get_reg_offset(adev, 0, regSDMA0_STATUS_REG)); @@ -1469,6 +1505,31 @@ static int sdma_v6_0_ring_preempt_ib(struct amdgpu_ring *ring) return r; } +static int sdma_v6_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid) +{ + struct amdgpu_device *adev = ring->adev; + int i, r; + + if (amdgpu_sriov_vf(adev)) + return -EINVAL; + + for (i = 0; i < adev->sdma.num_instances; i++) { + if (ring == &adev->sdma.instance[i].ring) + break; + } + + if (i == adev->sdma.num_instances) { + DRM_ERROR("sdma instance not found\n"); + return -EINVAL; + } + + r = amdgpu_mes_reset_legacy_queue(adev, ring, vmid, true); + if (r) + return r; + + return sdma_v6_0_gfx_resume_instance(adev, i, true); +} + static int sdma_v6_0_set_trap_irq_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, unsigned type, @@ -1556,9 +1617,9 @@ static void sdma_v6_0_get_clockgating_state(void *handle, u64 *flags) { } -static void sdma_v6_0_print_ip_state(void *handle, struct drm_printer *p) +static void sdma_v6_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_6_0); uint32_t instance_offset; @@ -1577,9 +1638,9 @@ static void sdma_v6_0_print_ip_state(void *handle, struct drm_printer *p) } } -static void sdma_v6_0_dump_ip_state(void *handle) +static void sdma_v6_0_dump_ip_state(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t instance_offset; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_6_0); @@ -1601,7 +1662,6 @@ static void sdma_v6_0_dump_ip_state(void *handle) const struct amd_ip_funcs sdma_v6_0_ip_funcs = { .name = "sdma_v6_0", .early_init = sdma_v6_0_early_init, - .late_init = NULL, .sw_init = sdma_v6_0_sw_init, .sw_fini = sdma_v6_0_sw_fini, .hw_init = sdma_v6_0_hw_init, @@ -1652,6 +1712,7 @@ static const struct amdgpu_ring_funcs sdma_v6_0_ring_funcs = { .emit_reg_write_reg_wait = sdma_v6_0_ring_emit_reg_write_reg_wait, .init_cond_exec = sdma_v6_0_ring_init_cond_exec, .preempt_ib = sdma_v6_0_ring_preempt_ib, + .reset = sdma_v6_0_reset_queue, }; static void sdma_v6_0_set_ring_funcs(struct amdgpu_device *adev) @@ -1726,7 +1787,7 @@ static void sdma_v6_0_emit_fill_buffer(struct amdgpu_ib *ib, uint64_t dst_offset, uint32_t byte_count) { - ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_CONST_FILL); + ib->ptr[ib->length_dw++] = SDMA_PKT_CONSTANT_FILL_HEADER_OP(SDMA_OP_CONST_FILL); ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset); ib->ptr[ib->length_dw++] = src_data; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c index a8763496aed3..d2ce6b6a7ff6 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c @@ -51,6 +51,12 @@ MODULE_FIRMWARE("amdgpu/sdma_7_0_1.bin"); #define SDMA0_HYP_DEC_REG_END 0x589a #define SDMA1_HYP_DEC_REG_OFFSET 0x20 +/*define for compression field for sdma7*/ +#define SDMA_PKT_CONSTANT_FILL_HEADER_compress_offset 0 +#define SDMA_PKT_CONSTANT_FILL_HEADER_compress_mask 0x00000001 +#define SDMA_PKT_CONSTANT_FILL_HEADER_compress_shift 16 +#define SDMA_PKT_CONSTANT_FILL_HEADER_COMPRESS(x) (((x) & SDMA_PKT_CONSTANT_FILL_HEADER_compress_mask) << SDMA_PKT_CONSTANT_FILL_HEADER_compress_shift) + static const struct amdgpu_hwip_reg_entry sdma_reg_list_7_0[] = { SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS_REG), SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS1_REG), @@ -747,9 +753,9 @@ static int sdma_v7_0_load_microcode(struct amdgpu_device *adev) return 0; } -static int sdma_v7_0_soft_reset(void *handle) +static int sdma_v7_0_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 tmp; int i; @@ -783,9 +789,9 @@ static int sdma_v7_0_soft_reset(void *handle) return sdma_v7_0_start(adev); } -static bool sdma_v7_0_check_soft_reset(void *handle) +static bool sdma_v7_0_check_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int i, r; long tmo = msecs_to_jiffies(1000); @@ -1253,9 +1259,9 @@ static void sdma_v7_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask); } -static int sdma_v7_0_early_init(void *handle) +static int sdma_v7_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_sdma_init_microcode(adev, 0, true); @@ -1273,11 +1279,11 @@ static int sdma_v7_0_early_init(void *handle) return 0; } -static int sdma_v7_0_sw_init(void *handle) +static int sdma_v7_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_7_0); uint32_t *ptr; @@ -1320,9 +1326,9 @@ static int sdma_v7_0_sw_init(void *handle) return r; } -static int sdma_v7_0_sw_fini(void *handle) +static int sdma_v7_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; for (i = 0; i < adev->sdma.num_instances; i++) @@ -1338,16 +1344,16 @@ static int sdma_v7_0_sw_fini(void *handle) return 0; } -static int sdma_v7_0_hw_init(void *handle) +static int sdma_v7_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return sdma_v7_0_start(adev); } -static int sdma_v7_0_hw_fini(void *handle) +static int sdma_v7_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) return 0; @@ -1358,18 +1364,14 @@ static int sdma_v7_0_hw_fini(void *handle) return 0; } -static int sdma_v7_0_suspend(void *handle) +static int sdma_v7_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return sdma_v7_0_hw_fini(adev); + return sdma_v7_0_hw_fini(ip_block); } -static int sdma_v7_0_resume(void *handle) +static int sdma_v7_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return sdma_v7_0_hw_init(adev); + return sdma_v7_0_hw_init(ip_block); } static bool sdma_v7_0_is_idle(void *handle) @@ -1387,11 +1389,11 @@ static bool sdma_v7_0_is_idle(void *handle) return true; } -static int sdma_v7_0_wait_for_idle(void *handle) +static int sdma_v7_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; u32 sdma0, sdma1; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { sdma0 = RREG32(sdma_v7_0_get_reg_offset(adev, 0, regSDMA0_STATUS_REG)); @@ -1538,9 +1540,9 @@ static void sdma_v7_0_get_clockgating_state(void *handle, u64 *flags) { } -static void sdma_v7_0_print_ip_state(void *handle, struct drm_printer *p) +static void sdma_v7_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_7_0); uint32_t instance_offset; @@ -1559,9 +1561,9 @@ static void sdma_v7_0_print_ip_state(void *handle, struct drm_printer *p) } } -static void sdma_v7_0_dump_ip_state(void *handle) +static void sdma_v7_0_dump_ip_state(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t instance_offset; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_7_0); @@ -1724,7 +1726,8 @@ static void sdma_v7_0_emit_fill_buffer(struct amdgpu_ib *ib, uint64_t dst_offset, uint32_t byte_count) { - ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_CONST_FILL); + ib->ptr[ib->length_dw++] = SDMA_PKT_CONSTANT_FILL_HEADER_OP(SDMA_OP_CONST_FILL) | + SDMA_PKT_CONSTANT_FILL_HEADER_COMPRESS(1); ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset); ib->ptr[ib->length_dw++] = src_data; diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index 85235470e872..00f63d3fbea7 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -2022,9 +2022,9 @@ static uint32_t si_get_rev_id(struct amdgpu_device *adev) >> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT; } -static int si_common_early_init(void *handle) +static int si_common_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->smc_rreg = &si_smc_rreg; adev->smc_wreg = &si_smc_wreg; @@ -2148,17 +2148,6 @@ static int si_common_early_init(void *handle) return 0; } -static int si_common_sw_init(void *handle) -{ - return 0; -} - -static int si_common_sw_fini(void *handle) -{ - return 0; -} - - static void si_init_golden_registers(struct amdgpu_device *adev) { switch (adev->asic_type) { @@ -2633,9 +2622,9 @@ static void si_fix_pci_max_read_req_size(struct amdgpu_device *adev) pcie_set_readrq(adev->pdev, 512); } -static int si_common_hw_init(void *handle) +static int si_common_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; si_fix_pci_max_read_req_size(adev); si_init_golden_registers(adev); @@ -2645,23 +2634,14 @@ static int si_common_hw_init(void *handle) return 0; } -static int si_common_hw_fini(void *handle) +static int si_common_hw_fini(struct amdgpu_ip_block *ip_block) { return 0; } -static int si_common_suspend(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return si_common_hw_fini(adev); -} - -static int si_common_resume(void *handle) +static int si_common_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return si_common_hw_init(adev); + return si_common_hw_init(ip_block); } static bool si_common_is_idle(void *handle) @@ -2669,16 +2649,6 @@ static bool si_common_is_idle(void *handle) return true; } -static int si_common_wait_for_idle(void *handle) -{ - return 0; -} - -static int si_common_soft_reset(void *handle) -{ - return 0; -} - static int si_common_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -2694,20 +2664,12 @@ static int si_common_set_powergating_state(void *handle, static const struct amd_ip_funcs si_common_ip_funcs = { .name = "si_common", .early_init = si_common_early_init, - .late_init = NULL, - .sw_init = si_common_sw_init, - .sw_fini = si_common_sw_fini, .hw_init = si_common_hw_init, .hw_fini = si_common_hw_fini, - .suspend = si_common_suspend, .resume = si_common_resume, .is_idle = si_common_is_idle, - .wait_for_idle = si_common_wait_for_idle, - .soft_reset = si_common_soft_reset, .set_clockgating_state = si_common_set_clockgating_state, .set_powergating_state = si_common_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ip_block_version si_common_ip_block = diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c index 11db5b755832..47647a6083e8 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dma.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c @@ -457,9 +457,9 @@ static void si_dma_ring_emit_wreg(struct amdgpu_ring *ring, amdgpu_ring_write(ring, val); } -static int si_dma_early_init(void *handle) +static int si_dma_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->sdma.num_instances = 2; @@ -471,11 +471,11 @@ static int si_dma_early_init(void *handle) return 0; } -static int si_dma_sw_init(void *handle) +static int si_dma_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* DMA0 trap event */ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 224, @@ -506,9 +506,9 @@ static int si_dma_sw_init(void *handle) return r; } -static int si_dma_sw_fini(void *handle) +static int si_dma_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; for (i = 0; i < adev->sdma.num_instances; i++) @@ -517,39 +517,34 @@ static int si_dma_sw_fini(void *handle) return 0; } -static int si_dma_hw_init(void *handle) +static int si_dma_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return si_dma_start(adev); } -static int si_dma_hw_fini(void *handle) +static int si_dma_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - si_dma_stop(adev); + si_dma_stop(ip_block->adev); return 0; } -static int si_dma_suspend(void *handle) +static int si_dma_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return si_dma_hw_fini(adev); + return si_dma_hw_fini(ip_block); } -static int si_dma_resume(void *handle) +static int si_dma_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return si_dma_hw_init(adev); + return si_dma_hw_init(ip_block); } static bool si_dma_is_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 tmp = RREG32(SRBM_STATUS2); if (tmp & (DMA_BUSY_MASK | DMA1_BUSY_MASK)) @@ -558,20 +553,20 @@ static bool si_dma_is_idle(void *handle) return true; } -static int si_dma_wait_for_idle(void *handle) +static int si_dma_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { - if (si_dma_is_idle(handle)) + if (si_dma_is_idle(adev)) return 0; udelay(1); } return -ETIMEDOUT; } -static int si_dma_soft_reset(void *handle) +static int si_dma_soft_reset(struct amdgpu_ip_block *ip_block) { DRM_INFO("si_dma_soft_reset --- not implemented !!!!!!!\n"); return 0; @@ -696,7 +691,6 @@ static int si_dma_set_powergating_state(void *handle, static const struct amd_ip_funcs si_dma_ip_funcs = { .name = "si_dma", .early_init = si_dma_early_init, - .late_init = NULL, .sw_init = si_dma_sw_init, .sw_fini = si_dma_sw_fini, .hw_init = si_dma_hw_init, @@ -708,8 +702,6 @@ static const struct amd_ip_funcs si_dma_ip_funcs = { .soft_reset = si_dma_soft_reset, .set_clockgating_state = si_dma_set_clockgating_state, .set_powergating_state = si_dma_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs si_dma_ring_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c index 5237395e4fab..2ec1ebe4db11 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c @@ -156,19 +156,19 @@ static void si_ih_set_rptr(struct amdgpu_device *adev, WREG32(IH_RB_RPTR, ih->rptr); } -static int si_ih_early_init(void *handle) +static int si_ih_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; si_ih_set_interrupt_funcs(adev); return 0; } -static int si_ih_sw_init(void *handle) +static int si_ih_sw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false); if (r) @@ -177,43 +177,37 @@ static int si_ih_sw_init(void *handle) return amdgpu_irq_init(adev); } -static int si_ih_sw_fini(void *handle) +static int si_ih_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_fini_sw(adev); return 0; } -static int si_ih_hw_init(void *handle) +static int si_ih_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return si_ih_irq_init(adev); } -static int si_ih_hw_fini(void *handle) +static int si_ih_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - si_ih_irq_disable(adev); + si_ih_irq_disable(ip_block->adev); return 0; } -static int si_ih_suspend(void *handle) +static int si_ih_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return si_ih_hw_fini(adev); + return si_ih_hw_fini(ip_block); } -static int si_ih_resume(void *handle) +static int si_ih_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return si_ih_hw_init(adev); + return si_ih_hw_init(ip_block); } static bool si_ih_is_idle(void *handle) @@ -227,22 +221,22 @@ static bool si_ih_is_idle(void *handle) return true; } -static int si_ih_wait_for_idle(void *handle) +static int si_ih_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { - if (si_ih_is_idle(handle)) + if (si_ih_is_idle(adev)) return 0; udelay(1); } return -ETIMEDOUT; } -static int si_ih_soft_reset(void *handle) +static int si_ih_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 srbm_soft_reset = 0; u32 tmp = RREG32(SRBM_STATUS); @@ -284,7 +278,6 @@ static int si_ih_set_powergating_state(void *handle, static const struct amd_ip_funcs si_ih_ip_funcs = { .name = "si_ih", .early_init = si_ih_early_init, - .late_init = NULL, .sw_init = si_ih_sw_init, .sw_fini = si_ih_sw_fini, .hw_init = si_ih_hw_init, @@ -296,8 +289,6 @@ static const struct amd_ip_funcs si_ih_ip_funcs = { .soft_reset = si_ih_soft_reset, .set_clockgating_state = si_ih_set_clockgating_state, .set_powergating_state = si_ih_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ih_funcs si_ih_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c index 481217c32d85..2594467bdd87 100644 --- a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c +++ b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c @@ -81,15 +81,9 @@ static int sienna_cichlid_mode2_suspend_ip(struct amdgpu_device *adev) AMD_IP_BLOCK_TYPE_SDMA)) continue; - r = adev->ip_blocks[i].version->funcs->suspend(adev); - - if (r) { - dev_err(adev->dev, - "suspend of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]); + if (r) return r; - } - adev->ip_blocks[i].status.hw = false; } return 0; @@ -175,15 +169,9 @@ static int sienna_cichlid_mode2_restore_ip(struct amdgpu_device *adev) for (i = 0; i < adev->num_ip_blocks; i++) { if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) { - r = adev->ip_blocks[i].version->funcs->resume(adev); - if (r) { - dev_err(adev->dev, - "resume of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + r = amdgpu_ip_block_resume(&adev->ip_blocks[i]); + if (r) return r; - } - - adev->ip_blocks[i].status.hw = true; } } @@ -193,15 +181,9 @@ static int sienna_cichlid_mode2_restore_ip(struct amdgpu_device *adev) adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA)) continue; - r = adev->ip_blocks[i].version->funcs->resume(adev); - if (r) { - dev_err(adev->dev, - "resume of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + r = amdgpu_ip_block_resume(&adev->ip_blocks[i]); + if (r) return r; - } - - adev->ip_blocks[i].status.hw = true; } for (i = 0; i < adev->num_ip_blocks; i++) { @@ -213,7 +195,7 @@ static int sienna_cichlid_mode2_restore_ip(struct amdgpu_device *adev) if (adev->ip_blocks[i].version->funcs->late_init) { r = adev->ip_blocks[i].version->funcs->late_init( - (void *)adev); + &adev->ip_blocks[i]); if (r) { dev_err(adev->dev, "late_init of IP block <%s> failed %d after reset\n", @@ -238,6 +220,7 @@ sienna_cichlid_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl, int r; struct amdgpu_device *tmp_adev = (struct amdgpu_device *)reset_ctl->handle; + amdgpu_set_init_level(tmp_adev, AMDGPU_INIT_LEVEL_RESET_RECOVERY); dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n"); r = sienna_cichlid_mode2_restore_ip(tmp_adev); @@ -255,6 +238,7 @@ sienna_cichlid_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl, amdgpu_irq_gpu_reset_resume_helper(tmp_adev); + amdgpu_set_init_level(tmp_adev, AMDGPU_INIT_LEVEL_DEFAULT); r = amdgpu_ib_ring_tests(tmp_adev); if (r) { dev_err(tmp_adev->dev, diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c b/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c index 0af648931df5..70569ea906bc 100644 --- a/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c +++ b/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c @@ -80,15 +80,9 @@ static int smu_v13_0_10_mode2_suspend_ip(struct amdgpu_device *adev) AMD_IP_BLOCK_TYPE_MES)) continue; - r = adev->ip_blocks[i].version->funcs->suspend(adev); - - if (r) { - dev_err(adev->dev, - "suspend of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]); + if (r) return r; - } - adev->ip_blocks[i].status.hw = false; } return 0; @@ -186,15 +180,9 @@ static int smu_v13_0_10_mode2_restore_ip(struct amdgpu_device *adev) adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA)) continue; - r = adev->ip_blocks[i].version->funcs->resume(adev); - if (r) { - dev_err(adev->dev, - "resume of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + r = amdgpu_ip_block_resume(&adev->ip_blocks[i]); + if (r) return r; - } - - adev->ip_blocks[i].status.hw = true; } for (i = 0; i < adev->num_ip_blocks; i++) { @@ -208,7 +196,7 @@ static int smu_v13_0_10_mode2_restore_ip(struct amdgpu_device *adev) if (adev->ip_blocks[i].version->funcs->late_init) { r = adev->ip_blocks[i].version->funcs->late_init( - (void *)adev); + &adev->ip_blocks[i]); if (r) { dev_err(adev->dev, "late_init of IP block <%s> failed %d after reset\n", @@ -233,6 +221,7 @@ smu_v13_0_10_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl, int r; struct amdgpu_device *tmp_adev = (struct amdgpu_device *)reset_ctl->handle; + amdgpu_set_init_level(tmp_adev, AMDGPU_INIT_LEVEL_RESET_RECOVERY); dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n"); r = smu_v13_0_10_mode2_restore_ip(tmp_adev); @@ -246,6 +235,7 @@ smu_v13_0_10_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl, amdgpu_irq_gpu_reset_resume_helper(tmp_adev); + amdgpu_set_init_level(tmp_adev, AMDGPU_INIT_LEVEL_DEFAULT); r = amdgpu_ib_ring_tests(tmp_adev); if (r) { dev_err(tmp_adev->dev, diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 8d16dacdc172..ede072758dab 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -90,8 +90,8 @@ static const struct amd_ip_funcs soc15_common_ip_funcs; /* Vega, Raven, Arcturus */ static const struct amdgpu_video_codec_info vega_video_codecs_encode_array[] = { - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 0)}, }; static const struct amdgpu_video_codecs vega_video_codecs_encode = @@ -578,20 +578,16 @@ soc15_asic_reset_method(struct amdgpu_device *adev) static bool soc15_need_reset_on_resume(struct amdgpu_device *adev) { - u32 sol_reg; - - sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); - /* Will reset for the following suspend abort cases. - * 1) Only reset limit on APU side, dGPU hasn't checked yet. - * 2) S3 suspend abort and TOS already launched. + * 1) Only reset on APU side, dGPU hasn't checked yet. + * 2) S3 suspend aborted in the normal S3 suspend or + * performing pm core test. */ if (adev->flags & AMD_IS_APU && adev->in_s3 && - !adev->suspend_complete && - sol_reg) + !pm_resume_via_firmware()) return true; - - return false; + else + return false; } static int soc15_asic_reset(struct amdgpu_device *adev) @@ -601,11 +597,17 @@ static int soc15_asic_reset(struct amdgpu_device *adev) * successfully. So now, temporarily enable it for the * S3 suspend abort case. */ - if (((adev->apu_flags & AMD_APU_IS_RAVEN) || - (adev->apu_flags & AMD_APU_IS_RAVEN2)) && - !soc15_need_reset_on_resume(adev)) + + if ((adev->apu_flags & AMD_APU_IS_PICASSO || + !(adev->apu_flags & AMD_APU_IS_RAVEN)) && + soc15_need_reset_on_resume(adev)) + goto asic_reset; + + if ((adev->apu_flags & AMD_APU_IS_RAVEN) || + (adev->apu_flags & AMD_APU_IS_RAVEN2)) return 0; +asic_reset: switch (soc15_asic_reset_method(adev)) { case AMD_RESET_METHOD_PCI: dev_info(adev->dev, "PCI reset\n"); @@ -829,6 +831,10 @@ static bool soc15_need_reset_on_init(struct amdgpu_device *adev) if (adev->asic_type == CHIP_RENOIR) return true; + if (amdgpu_gmc_need_reset_on_init(adev)) + return true; + if (amdgpu_psp_tos_reload_needed(adev)) + return true; /* Just return false for soc15 GPUs. Reset does not seem to * be necessary. */ @@ -929,9 +935,9 @@ static const struct amdgpu_asic_funcs aqua_vanjaram_asic_funcs = .get_reg_state = &aqua_vanjaram_get_reg_state, }; -static int soc15_common_early_init(void *handle) +static int soc15_common_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->nbio.funcs->set_reg_remap(adev); adev->smc_rreg = NULL; @@ -1198,9 +1204,9 @@ static int soc15_common_early_init(void *handle) return 0; } -static int soc15_common_late_init(void *handle) +static int soc15_common_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) xgpu_ai_mailbox_get_irq(adev); @@ -1213,9 +1219,9 @@ static int soc15_common_late_init(void *handle) return 0; } -static int soc15_common_sw_init(void *handle) +static int soc15_common_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) xgpu_ai_mailbox_add_irq_id(adev); @@ -1227,9 +1233,9 @@ static int soc15_common_sw_init(void *handle) return 0; } -static int soc15_common_sw_fini(void *handle) +static int soc15_common_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->df.funcs && adev->df.funcs->sw_fini) @@ -1251,9 +1257,9 @@ static void soc15_sdma_doorbell_range_init(struct amdgpu_device *adev) } } -static int soc15_common_hw_init(void *handle) +static int soc15_common_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* enable aspm */ soc15_program_aspm(adev); @@ -1280,9 +1286,9 @@ static int soc15_common_hw_init(void *handle) return 0; } -static int soc15_common_hw_fini(void *handle) +static int soc15_common_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* Disable the doorbell aperture and selfring doorbell aperture * separately in hw_fini because soc15_enable_doorbell_aperture @@ -1295,7 +1301,12 @@ static int soc15_common_hw_fini(void *handle) if (amdgpu_sriov_vf(adev)) xgpu_ai_mailbox_put_irq(adev); + /* + * For minimal init, late_init is not called, hence RAS irqs are not + * enabled. + */ if ((!amdgpu_sriov_vf(adev)) && + (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) && adev->nbio.ras_if && amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) { if (adev->nbio.ras && @@ -1309,22 +1320,20 @@ static int soc15_common_hw_fini(void *handle) return 0; } -static int soc15_common_suspend(void *handle) +static int soc15_common_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return soc15_common_hw_fini(adev); + return soc15_common_hw_fini(ip_block); } -static int soc15_common_resume(void *handle) +static int soc15_common_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (soc15_need_reset_on_resume(adev)) { dev_info(adev->dev, "S3 suspend abort case, let's reset ASIC.\n"); soc15_asic_reset(adev); } - return soc15_common_hw_init(adev); + return soc15_common_hw_init(ip_block); } static bool soc15_common_is_idle(void *handle) @@ -1332,16 +1341,6 @@ static bool soc15_common_is_idle(void *handle) return true; } -static int soc15_common_wait_for_idle(void *handle) -{ - return 0; -} - -static int soc15_common_soft_reset(void *handle) -{ - return 0; -} - static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable) { uint32_t def, data; @@ -1492,11 +1491,7 @@ static const struct amd_ip_funcs soc15_common_ip_funcs = { .suspend = soc15_common_suspend, .resume = soc15_common_resume, .is_idle = soc15_common_is_idle, - .wait_for_idle = soc15_common_wait_for_idle, - .soft_reset = soc15_common_soft_reset, .set_clockgating_state = soc15_common_set_clockgating_state, .set_powergating_state = soc15_common_set_powergating_state, .get_clockgating_state= soc15_common_get_clockgating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index d30ad7d56def..d6999835918f 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -49,13 +49,13 @@ static const struct amd_ip_funcs soc21_common_ip_funcs; /* SOC21 */ static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn0[] = { - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)}, }; static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn1[] = { - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)}, }; @@ -96,14 +96,14 @@ static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode_vcn1 = { /* SRIOV SOC21, not const since data is controlled by host */ static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_encode_array_vcn0[] = { - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)}, }; static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_encode_array_vcn1[] = { - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)}, }; static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_encode_vcn0 = { @@ -556,9 +556,9 @@ static const struct amdgpu_asic_funcs soc21_asic_funcs = { .update_umd_stable_pstate = &soc21_update_umd_stable_pstate, }; -static int soc21_common_early_init(void *handle) +static int soc21_common_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->nbio.funcs->set_reg_remap(adev); adev->smc_rreg = NULL; @@ -794,9 +794,9 @@ static int soc21_common_early_init(void *handle) return 0; } -static int soc21_common_late_init(void *handle) +static int soc21_common_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) { xgpu_nv_mailbox_get_irq(adev); @@ -832,9 +832,9 @@ static int soc21_common_late_init(void *handle) return 0; } -static int soc21_common_sw_init(void *handle) +static int soc21_common_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) xgpu_nv_mailbox_add_irq_id(adev); @@ -842,14 +842,9 @@ static int soc21_common_sw_init(void *handle) return 0; } -static int soc21_common_sw_fini(void *handle) +static int soc21_common_hw_init(struct amdgpu_ip_block *ip_block) { - return 0; -} - -static int soc21_common_hw_init(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* enable aspm */ soc21_program_aspm(adev); @@ -867,9 +862,9 @@ static int soc21_common_hw_init(void *handle) return 0; } -static int soc21_common_hw_fini(void *handle) +static int soc21_common_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* Disable the doorbell aperture and selfring doorbell aperture * separately in hw_fini because soc21_enable_doorbell_aperture @@ -890,11 +885,9 @@ static int soc21_common_hw_fini(void *handle) return 0; } -static int soc21_common_suspend(void *handle) +static int soc21_common_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return soc21_common_hw_fini(adev); + return soc21_common_hw_fini(ip_block); } static bool soc21_need_reset_on_resume(struct amdgpu_device *adev) @@ -904,9 +897,10 @@ static bool soc21_need_reset_on_resume(struct amdgpu_device *adev) /* Will reset for the following suspend abort cases. * 1) Only reset dGPU side. * 2) S3 suspend got aborted and TOS is active. + * As for dGPU suspend abort cases the SOL value + * will be kept as zero at this resume point. */ - if (!(adev->flags & AMD_IS_APU) && adev->in_s3 && - !adev->suspend_complete) { + if (!(adev->flags & AMD_IS_APU) && adev->in_s3) { sol_reg1 = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81); msleep(100); sol_reg2 = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81); @@ -917,16 +911,16 @@ static bool soc21_need_reset_on_resume(struct amdgpu_device *adev) return false; } -static int soc21_common_resume(void *handle) +static int soc21_common_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (soc21_need_reset_on_resume(adev)) { dev_info(adev->dev, "S3 suspend aborted, resetting..."); soc21_asic_reset(adev); } - return soc21_common_hw_init(adev); + return soc21_common_hw_init(ip_block); } static bool soc21_common_is_idle(void *handle) @@ -934,16 +928,6 @@ static bool soc21_common_is_idle(void *handle) return true; } -static int soc21_common_wait_for_idle(void *handle) -{ - return 0; -} - -static int soc21_common_soft_reset(void *handle) -{ - return 0; -} - static int soc21_common_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -1002,17 +986,12 @@ static const struct amd_ip_funcs soc21_common_ip_funcs = { .early_init = soc21_common_early_init, .late_init = soc21_common_late_init, .sw_init = soc21_common_sw_init, - .sw_fini = soc21_common_sw_fini, .hw_init = soc21_common_hw_init, .hw_fini = soc21_common_hw_fini, .suspend = soc21_common_suspend, .resume = soc21_common_resume, .is_idle = soc21_common_is_idle, - .wait_for_idle = soc21_common_wait_for_idle, - .soft_reset = soc21_common_soft_reset, .set_clockgating_state = soc21_common_set_clockgating_state, .set_powergating_state = soc21_common_set_powergating_state, .get_clockgating_state = soc21_common_get_clockgating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; diff --git a/drivers/gpu/drm/amd/amdgpu/soc24.c b/drivers/gpu/drm/amd/amdgpu/soc24.c index fd4c3d4f8387..be96de92b2f5 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc24.c +++ b/drivers/gpu/drm/amd/amdgpu/soc24.c @@ -48,7 +48,7 @@ static const struct amd_ip_funcs soc24_common_ip_funcs; static const struct amdgpu_video_codec_info vcn_5_0_0_video_codecs_encode_array_vcn0[] = { - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)}, }; @@ -363,9 +363,9 @@ static const struct amdgpu_asic_funcs soc24_asic_funcs = { .update_umd_stable_pstate = &soc24_update_umd_stable_pstate, }; -static int soc24_common_early_init(void *handle) +static int soc24_common_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->nbio.funcs->set_reg_remap(adev); adev->smc_rreg = NULL; @@ -440,9 +440,9 @@ static int soc24_common_early_init(void *handle) return 0; } -static int soc24_common_late_init(void *handle) +static int soc24_common_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) xgpu_nv_mailbox_get_irq(adev); @@ -455,9 +455,9 @@ static int soc24_common_late_init(void *handle) return 0; } -static int soc24_common_sw_init(void *handle) +static int soc24_common_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) xgpu_nv_mailbox_add_irq_id(adev); @@ -465,14 +465,9 @@ static int soc24_common_sw_init(void *handle) return 0; } -static int soc24_common_sw_fini(void *handle) +static int soc24_common_hw_init(struct amdgpu_ip_block *ip_block) { - return 0; -} - -static int soc24_common_hw_init(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* enable aspm */ soc24_program_aspm(adev); @@ -494,9 +489,9 @@ static int soc24_common_hw_init(void *handle) return 0; } -static int soc24_common_hw_fini(void *handle) +static int soc24_common_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* Disable the doorbell aperture and selfring doorbell aperture * separately in hw_fini because soc21_enable_doorbell_aperture @@ -512,18 +507,14 @@ static int soc24_common_hw_fini(void *handle) return 0; } -static int soc24_common_suspend(void *handle) +static int soc24_common_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return soc24_common_hw_fini(adev); + return soc24_common_hw_fini(ip_block); } -static int soc24_common_resume(void *handle) +static int soc24_common_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return soc24_common_hw_init(adev); + return soc24_common_hw_init(ip_block); } static bool soc24_common_is_idle(void *handle) @@ -531,16 +522,6 @@ static bool soc24_common_is_idle(void *handle) return true; } -static int soc24_common_wait_for_idle(void *handle) -{ - return 0; -} - -static int soc24_common_soft_reset(void *handle) -{ - return 0; -} - static int soc24_common_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -595,14 +576,11 @@ static const struct amd_ip_funcs soc24_common_ip_funcs = { .early_init = soc24_common_early_init, .late_init = soc24_common_late_init, .sw_init = soc24_common_sw_init, - .sw_fini = soc24_common_sw_fini, .hw_init = soc24_common_hw_init, .hw_fini = soc24_common_hw_fini, .suspend = soc24_common_suspend, .resume = soc24_common_resume, .is_idle = soc24_common_is_idle, - .wait_for_idle = soc24_common_wait_for_idle, - .soft_reset = soc24_common_soft_reset, .set_clockgating_state = soc24_common_set_clockgating_state, .set_powergating_state = soc24_common_set_powergating_state, .get_clockgating_state = soc24_common_get_clockgating_state, diff --git a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h index 3ac56a9645eb..21b71a427b1f 100644 --- a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h +++ b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h @@ -113,6 +113,14 @@ enum ta_ras_address_type { TA_RAS_PA_TO_MCA, }; +enum ta_ras_nps_mode { + TA_RAS_UNKNOWN_MODE = 0, + TA_RAS_NPS1_MODE = 1, + TA_RAS_NPS2_MODE = 2, + TA_RAS_NPS4_MODE = 4, + TA_RAS_NPS8_MODE = 8, +}; + /* Input/output structures for RAS commands */ /**********************************************************/ @@ -139,6 +147,7 @@ struct ta_ras_init_flags { uint8_t dgpu_mode; uint16_t xcc_mask; uint8_t channel_dis_num; + uint8_t nps_mode; }; struct ta_ras_mca_addr { diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c index 24d49d813607..5a04a6770138 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c @@ -283,9 +283,9 @@ static void tonga_ih_set_rptr(struct amdgpu_device *adev, } } -static int tonga_ih_early_init(void *handle) +static int tonga_ih_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int ret; ret = amdgpu_irq_add_domain(adev); @@ -297,10 +297,10 @@ static int tonga_ih_early_init(void *handle) return 0; } -static int tonga_ih_sw_init(void *handle) +static int tonga_ih_sw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, true); if (r) @@ -314,9 +314,9 @@ static int tonga_ih_sw_init(void *handle) return r; } -static int tonga_ih_sw_fini(void *handle) +static int tonga_ih_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_fini_sw(adev); amdgpu_irq_remove_domain(adev); @@ -324,10 +324,10 @@ static int tonga_ih_sw_fini(void *handle) return 0; } -static int tonga_ih_hw_init(void *handle) +static int tonga_ih_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = tonga_ih_irq_init(adev); if (r) @@ -336,27 +336,21 @@ static int tonga_ih_hw_init(void *handle) return 0; } -static int tonga_ih_hw_fini(void *handle) +static int tonga_ih_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - tonga_ih_irq_disable(adev); + tonga_ih_irq_disable(ip_block->adev); return 0; } -static int tonga_ih_suspend(void *handle) +static int tonga_ih_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return tonga_ih_hw_fini(adev); + return tonga_ih_hw_fini(ip_block); } -static int tonga_ih_resume(void *handle) +static int tonga_ih_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return tonga_ih_hw_init(adev); + return tonga_ih_hw_init(ip_block); } static bool tonga_ih_is_idle(void *handle) @@ -370,11 +364,11 @@ static bool tonga_ih_is_idle(void *handle) return true; } -static int tonga_ih_wait_for_idle(void *handle) +static int tonga_ih_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { /* read MC_STATUS */ @@ -386,9 +380,9 @@ static int tonga_ih_wait_for_idle(void *handle) return -ETIMEDOUT; } -static bool tonga_ih_check_soft_reset(void *handle) +static bool tonga_ih_check_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 srbm_soft_reset = 0; u32 tmp = RREG32(mmSRBM_STATUS); @@ -405,29 +399,27 @@ static bool tonga_ih_check_soft_reset(void *handle) } } -static int tonga_ih_pre_soft_reset(void *handle) +static int tonga_ih_pre_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (!adev->irq.srbm_soft_reset) + if (!ip_block->adev->irq.srbm_soft_reset) return 0; - return tonga_ih_hw_fini(adev); + return tonga_ih_hw_fini(ip_block); } -static int tonga_ih_post_soft_reset(void *handle) +static int tonga_ih_post_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (!adev->irq.srbm_soft_reset) return 0; - return tonga_ih_hw_init(adev); + return tonga_ih_hw_init(ip_block); } -static int tonga_ih_soft_reset(void *handle) +static int tonga_ih_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 srbm_soft_reset; if (!adev->irq.srbm_soft_reset) @@ -471,7 +463,6 @@ static int tonga_ih_set_powergating_state(void *handle, static const struct amd_ip_funcs tonga_ih_ip_funcs = { .name = "tonga_ih", .early_init = tonga_ih_early_init, - .late_init = NULL, .sw_init = tonga_ih_sw_init, .sw_fini = tonga_ih_sw_fini, .hw_init = tonga_ih_hw_init, @@ -486,8 +477,6 @@ static const struct amd_ip_funcs tonga_ih_ip_funcs = { .post_soft_reset = tonga_ih_post_soft_reset, .set_clockgating_state = tonga_ih_set_clockgating_state, .set_powergating_state = tonga_ih_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ih_funcs tonga_ih_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c index 805d6662c88b..bdbca25d80c4 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c @@ -531,9 +531,9 @@ static void uvd_v3_1_set_irq_funcs(struct amdgpu_device *adev) } -static int uvd_v3_1_early_init(void *handle) +static int uvd_v3_1_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->uvd.num_uvd_inst = 1; uvd_v3_1_set_ring_funcs(adev); @@ -542,10 +542,10 @@ static int uvd_v3_1_early_init(void *handle) return 0; } -static int uvd_v3_1_sw_init(void *handle) +static int uvd_v3_1_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; void *ptr; uint32_t ucode_len; @@ -580,10 +580,10 @@ static int uvd_v3_1_sw_init(void *handle) return r; } -static int uvd_v3_1_sw_fini(void *handle) +static int uvd_v3_1_sw_fini(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = amdgpu_uvd_suspend(adev); if (r) @@ -621,13 +621,13 @@ static void uvd_v3_1_enable_mgcg(struct amdgpu_device *adev, /** * uvd_v3_1_hw_init - start and test UVD block * - * @handle: handle used to pass amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Initialize the hardware, boot up the VCPU and do some testing */ -static int uvd_v3_1_hw_init(void *handle) +static int uvd_v3_1_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring = &adev->uvd.inst->ring; uint32_t tmp; int r; @@ -688,13 +688,13 @@ done: /** * uvd_v3_1_hw_fini - stop the hardware block * - * @handle: handle used to pass amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the UVD block, mark ring as not ready any more */ -static int uvd_v3_1_hw_fini(void *handle) +static int uvd_v3_1_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; cancel_delayed_work_sync(&adev->uvd.idle_work); @@ -704,17 +704,17 @@ static int uvd_v3_1_hw_fini(void *handle) return 0; } -static int uvd_v3_1_prepare_suspend(void *handle) +static int uvd_v3_1_prepare_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return amdgpu_uvd_prepare_suspend(adev); } -static int uvd_v3_1_suspend(void *handle) +static int uvd_v3_1_suspend(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* * Proper cleanups before halting the HW engine: @@ -740,23 +740,22 @@ static int uvd_v3_1_suspend(void *handle) AMD_CG_STATE_GATE); } - r = uvd_v3_1_hw_fini(adev); + r = uvd_v3_1_hw_fini(ip_block); if (r) return r; return amdgpu_uvd_suspend(adev); } -static int uvd_v3_1_resume(void *handle) +static int uvd_v3_1_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_uvd_resume(adev); + r = amdgpu_uvd_resume(ip_block->adev); if (r) return r; - return uvd_v3_1_hw_init(adev); + return uvd_v3_1_hw_init(ip_block); } static bool uvd_v3_1_is_idle(void *handle) @@ -766,10 +765,10 @@ static bool uvd_v3_1_is_idle(void *handle) return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); } -static int uvd_v3_1_wait_for_idle(void *handle) +static int uvd_v3_1_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK)) @@ -778,9 +777,9 @@ static int uvd_v3_1_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int uvd_v3_1_soft_reset(void *handle) +static int uvd_v3_1_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uvd_v3_1_stop(adev); @@ -806,7 +805,6 @@ static int uvd_v3_1_set_powergating_state(void *handle, static const struct amd_ip_funcs uvd_v3_1_ip_funcs = { .name = "uvd_v3_1", .early_init = uvd_v3_1_early_init, - .late_init = NULL, .sw_init = uvd_v3_1_sw_init, .sw_fini = uvd_v3_1_sw_fini, .hw_init = uvd_v3_1_hw_init, @@ -819,8 +817,6 @@ static const struct amd_ip_funcs uvd_v3_1_ip_funcs = { .soft_reset = uvd_v3_1_soft_reset, .set_clockgating_state = uvd_v3_1_set_clockgating_state, .set_powergating_state = uvd_v3_1_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; const struct amdgpu_ip_block_version uvd_v3_1_ip_block = { diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index 3f19c606f4de..a836dc9cfcad 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -90,9 +90,9 @@ static void uvd_v4_2_ring_set_wptr(struct amdgpu_ring *ring) WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); } -static int uvd_v4_2_early_init(void *handle) +static int uvd_v4_2_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->uvd.num_uvd_inst = 1; uvd_v4_2_set_ring_funcs(adev); @@ -101,10 +101,10 @@ static int uvd_v4_2_early_init(void *handle) return 0; } -static int uvd_v4_2_sw_init(void *handle) +static int uvd_v4_2_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; /* UVD TRAP */ @@ -130,10 +130,10 @@ static int uvd_v4_2_sw_init(void *handle) return r; } -static int uvd_v4_2_sw_fini(void *handle) +static int uvd_v4_2_sw_fini(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = amdgpu_uvd_suspend(adev); if (r) @@ -147,13 +147,13 @@ static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev, /** * uvd_v4_2_hw_init - start and test UVD block * - * @handle: handle used to pass amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Initialize the hardware, boot up the VCPU and do some testing */ -static int uvd_v4_2_hw_init(void *handle) +static int uvd_v4_2_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring = &adev->uvd.inst->ring; uint32_t tmp; int r; @@ -202,13 +202,13 @@ done: /** * uvd_v4_2_hw_fini - stop the hardware block * - * @handle: handle used to pass amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the UVD block, mark ring as not ready any more */ -static int uvd_v4_2_hw_fini(void *handle) +static int uvd_v4_2_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; cancel_delayed_work_sync(&adev->uvd.idle_work); @@ -218,17 +218,17 @@ static int uvd_v4_2_hw_fini(void *handle) return 0; } -static int uvd_v4_2_prepare_suspend(void *handle) +static int uvd_v4_2_prepare_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return amdgpu_uvd_prepare_suspend(adev); } -static int uvd_v4_2_suspend(void *handle) +static int uvd_v4_2_suspend(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* * Proper cleanups before halting the HW engine: @@ -254,23 +254,22 @@ static int uvd_v4_2_suspend(void *handle) AMD_CG_STATE_GATE); } - r = uvd_v4_2_hw_fini(adev); + r = uvd_v4_2_hw_fini(ip_block); if (r) return r; return amdgpu_uvd_suspend(adev); } -static int uvd_v4_2_resume(void *handle) +static int uvd_v4_2_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_uvd_resume(adev); + r = amdgpu_uvd_resume(ip_block->adev); if (r) return r; - return uvd_v4_2_hw_init(adev); + return uvd_v4_2_hw_init(ip_block); } /** @@ -666,10 +665,10 @@ static bool uvd_v4_2_is_idle(void *handle) return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); } -static int uvd_v4_2_wait_for_idle(void *handle) +static int uvd_v4_2_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK)) @@ -678,9 +677,9 @@ static int uvd_v4_2_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int uvd_v4_2_soft_reset(void *handle) +static int uvd_v4_2_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uvd_v4_2_stop(adev); @@ -756,7 +755,6 @@ static int uvd_v4_2_set_powergating_state(void *handle, static const struct amd_ip_funcs uvd_v4_2_ip_funcs = { .name = "uvd_v4_2", .early_init = uvd_v4_2_early_init, - .late_init = NULL, .sw_init = uvd_v4_2_sw_init, .sw_fini = uvd_v4_2_sw_fini, .hw_init = uvd_v4_2_hw_init, @@ -769,8 +767,6 @@ static const struct amd_ip_funcs uvd_v4_2_ip_funcs = { .soft_reset = uvd_v4_2_soft_reset, .set_clockgating_state = uvd_v4_2_set_clockgating_state, .set_powergating_state = uvd_v4_2_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index efd903c21d48..ab55fae3569e 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -88,9 +88,9 @@ static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring *ring) WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); } -static int uvd_v5_0_early_init(void *handle) +static int uvd_v5_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->uvd.num_uvd_inst = 1; uvd_v5_0_set_ring_funcs(adev); @@ -99,10 +99,10 @@ static int uvd_v5_0_early_init(void *handle) return 0; } -static int uvd_v5_0_sw_init(void *handle) +static int uvd_v5_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; /* UVD TRAP */ @@ -128,10 +128,10 @@ static int uvd_v5_0_sw_init(void *handle) return r; } -static int uvd_v5_0_sw_fini(void *handle) +static int uvd_v5_0_sw_fini(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = amdgpu_uvd_suspend(adev); if (r) @@ -143,13 +143,13 @@ static int uvd_v5_0_sw_fini(void *handle) /** * uvd_v5_0_hw_init - start and test UVD block * - * @handle: handle used to pass amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Initialize the hardware, boot up the VCPU and do some testing */ -static int uvd_v5_0_hw_init(void *handle) +static int uvd_v5_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring = &adev->uvd.inst->ring; uint32_t tmp; int r; @@ -200,13 +200,13 @@ done: /** * uvd_v5_0_hw_fini - stop the hardware block * - * @handle: handle used to pass amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the UVD block, mark ring as not ready any more */ -static int uvd_v5_0_hw_fini(void *handle) +static int uvd_v5_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; cancel_delayed_work_sync(&adev->uvd.idle_work); @@ -216,17 +216,17 @@ static int uvd_v5_0_hw_fini(void *handle) return 0; } -static int uvd_v5_0_prepare_suspend(void *handle) +static int uvd_v5_0_prepare_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return amdgpu_uvd_prepare_suspend(adev); } -static int uvd_v5_0_suspend(void *handle) +static int uvd_v5_0_suspend(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* * Proper cleanups before halting the HW engine: @@ -252,23 +252,22 @@ static int uvd_v5_0_suspend(void *handle) AMD_CG_STATE_GATE); } - r = uvd_v5_0_hw_fini(adev); + r = uvd_v5_0_hw_fini(ip_block); if (r) return r; return amdgpu_uvd_suspend(adev); } -static int uvd_v5_0_resume(void *handle) +static int uvd_v5_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_uvd_resume(adev); + r = amdgpu_uvd_resume(ip_block->adev); if (r) return r; - return uvd_v5_0_hw_init(adev); + return uvd_v5_0_hw_init(ip_block); } /** @@ -588,10 +587,10 @@ static bool uvd_v5_0_is_idle(void *handle) return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); } -static int uvd_v5_0_wait_for_idle(void *handle) +static int uvd_v5_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK)) @@ -600,9 +599,9 @@ static int uvd_v5_0_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int uvd_v5_0_soft_reset(void *handle) +static int uvd_v5_0_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uvd_v5_0_stop(adev); @@ -796,10 +795,15 @@ static int uvd_v5_0_set_clockgating_state(void *handle, { struct amdgpu_device *adev = (struct amdgpu_device *)handle; bool enable = (state == AMD_CG_STATE_GATE); + struct amdgpu_ip_block *ip_block; + + ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD); + if (!ip_block) + return -EINVAL; if (enable) { /* wait for STATUS to clear */ - if (uvd_v5_0_wait_for_idle(handle)) + if (uvd_v5_0_wait_for_idle(ip_block)) return -EBUSY; uvd_v5_0_enable_clock_gating(adev, true); @@ -863,7 +867,6 @@ out: static const struct amd_ip_funcs uvd_v5_0_ip_funcs = { .name = "uvd_v5_0", .early_init = uvd_v5_0_early_init, - .late_init = NULL, .sw_init = uvd_v5_0_sw_init, .sw_fini = uvd_v5_0_sw_fini, .hw_init = uvd_v5_0_hw_init, @@ -877,8 +880,6 @@ static const struct amd_ip_funcs uvd_v5_0_ip_funcs = { .set_clockgating_state = uvd_v5_0_set_clockgating_state, .set_powergating_state = uvd_v5_0_set_powergating_state, .get_clockgating_state = uvd_v5_0_get_clockgating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 495de5068455..39f8c3d3a135 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -354,9 +354,9 @@ error: return r; } -static int uvd_v6_0_early_init(void *handle) +static int uvd_v6_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->uvd.num_uvd_inst = 1; if (!(adev->flags & AMD_IS_APU) && @@ -375,11 +375,11 @@ static int uvd_v6_0_early_init(void *handle) return 0; } -static int uvd_v6_0_sw_init(void *handle) +static int uvd_v6_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int i, r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* UVD TRAP */ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq); @@ -435,10 +435,10 @@ static int uvd_v6_0_sw_init(void *handle) return r; } -static int uvd_v6_0_sw_fini(void *handle) +static int uvd_v6_0_sw_fini(struct amdgpu_ip_block *ip_block) { int i, r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = amdgpu_uvd_suspend(adev); if (r) @@ -455,13 +455,13 @@ static int uvd_v6_0_sw_fini(void *handle) /** * uvd_v6_0_hw_init - start and test UVD block * - * @handle: handle used to pass amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Initialize the hardware, boot up the VCPU and do some testing */ -static int uvd_v6_0_hw_init(void *handle) +static int uvd_v6_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring = &adev->uvd.inst->ring; uint32_t tmp; int i, r; @@ -524,13 +524,13 @@ done: /** * uvd_v6_0_hw_fini - stop the hardware block * - * @handle: handle used to pass amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the UVD block, mark ring as not ready any more */ -static int uvd_v6_0_hw_fini(void *handle) +static int uvd_v6_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; cancel_delayed_work_sync(&adev->uvd.idle_work); @@ -540,17 +540,17 @@ static int uvd_v6_0_hw_fini(void *handle) return 0; } -static int uvd_v6_0_prepare_suspend(void *handle) +static int uvd_v6_0_prepare_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return amdgpu_uvd_prepare_suspend(adev); } -static int uvd_v6_0_suspend(void *handle) +static int uvd_v6_0_suspend(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* * Proper cleanups before halting the HW engine: @@ -576,23 +576,22 @@ static int uvd_v6_0_suspend(void *handle) AMD_CG_STATE_GATE); } - r = uvd_v6_0_hw_fini(adev); + r = uvd_v6_0_hw_fini(ip_block); if (r) return r; return amdgpu_uvd_suspend(adev); } -static int uvd_v6_0_resume(void *handle) +static int uvd_v6_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_uvd_resume(adev); + r = amdgpu_uvd_resume(ip_block->adev); if (r) return r; - return uvd_v6_0_hw_init(adev); + return uvd_v6_0_hw_init(ip_block); } /** @@ -1151,22 +1150,22 @@ static bool uvd_v6_0_is_idle(void *handle) return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); } -static int uvd_v6_0_wait_for_idle(void *handle) +static int uvd_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { - if (uvd_v6_0_is_idle(handle)) + if (uvd_v6_0_is_idle(adev)) return 0; } return -ETIMEDOUT; } #define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd -static bool uvd_v6_0_check_soft_reset(void *handle) +static bool uvd_v6_0_check_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 srbm_soft_reset = 0; u32 tmp = RREG32(mmSRBM_STATUS); @@ -1184,9 +1183,9 @@ static bool uvd_v6_0_check_soft_reset(void *handle) } } -static int uvd_v6_0_pre_soft_reset(void *handle) +static int uvd_v6_0_pre_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (!adev->uvd.inst->srbm_soft_reset) return 0; @@ -1195,9 +1194,9 @@ static int uvd_v6_0_pre_soft_reset(void *handle) return 0; } -static int uvd_v6_0_soft_reset(void *handle) +static int uvd_v6_0_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 srbm_soft_reset; if (!adev->uvd.inst->srbm_soft_reset) @@ -1226,9 +1225,9 @@ static int uvd_v6_0_soft_reset(void *handle) return 0; } -static int uvd_v6_0_post_soft_reset(void *handle) +static int uvd_v6_0_post_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (!adev->uvd.inst->srbm_soft_reset) return 0; @@ -1455,11 +1454,16 @@ static int uvd_v6_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_ip_block *ip_block; bool enable = (state == AMD_CG_STATE_GATE); + ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD); + if (!ip_block) + return -EINVAL; + if (enable) { /* wait for STATUS to clear */ - if (uvd_v6_0_wait_for_idle(handle)) + if (uvd_v6_0_wait_for_idle(ip_block)) return -EBUSY; uvd_v6_0_enable_clock_gating(adev, true); /* enable HW gates because UVD is idle */ @@ -1528,7 +1532,6 @@ out: static const struct amd_ip_funcs uvd_v6_0_ip_funcs = { .name = "uvd_v6_0", .early_init = uvd_v6_0_early_init, - .late_init = NULL, .sw_init = uvd_v6_0_sw_init, .sw_fini = uvd_v6_0_sw_fini, .hw_init = uvd_v6_0_hw_init, @@ -1545,8 +1548,6 @@ static const struct amd_ip_funcs uvd_v6_0_ip_funcs = { .set_clockgating_state = uvd_v6_0_set_clockgating_state, .set_powergating_state = uvd_v6_0_set_powergating_state, .get_clockgating_state = uvd_v6_0_get_clockgating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 6068b784dc69..079131aeb2f7 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -361,9 +361,9 @@ error: return r; } -static int uvd_v7_0_early_init(void *handle) +static int uvd_v7_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->asic_type == CHIP_VEGA20) { u32 harvest; @@ -395,12 +395,12 @@ static int uvd_v7_0_early_init(void *handle) return 0; } -static int uvd_v7_0_sw_init(void *handle) +static int uvd_v7_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int i, j, r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (j = 0; j < adev->uvd.num_uvd_inst; j++) { if (adev->uvd.harvest_config & (1 << j)) @@ -487,10 +487,10 @@ static int uvd_v7_0_sw_init(void *handle) return r; } -static int uvd_v7_0_sw_fini(void *handle) +static int uvd_v7_0_sw_fini(struct amdgpu_ip_block *ip_block) { int i, j, r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_virt_free_mm_table(adev); @@ -510,13 +510,13 @@ static int uvd_v7_0_sw_fini(void *handle) /** * uvd_v7_0_hw_init - start and test UVD block * - * @handle: handle used to pass amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Initialize the hardware, boot up the VCPU and do some testing */ -static int uvd_v7_0_hw_init(void *handle) +static int uvd_v7_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; uint32_t tmp; int i, j, r; @@ -588,13 +588,13 @@ done: /** * uvd_v7_0_hw_fini - stop the hardware block * - * @handle: handle used to pass amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the UVD block, mark ring as not ready any more */ -static int uvd_v7_0_hw_fini(void *handle) +static int uvd_v7_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; cancel_delayed_work_sync(&adev->uvd.idle_work); @@ -608,17 +608,17 @@ static int uvd_v7_0_hw_fini(void *handle) return 0; } -static int uvd_v7_0_prepare_suspend(void *handle) +static int uvd_v7_0_prepare_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return amdgpu_uvd_prepare_suspend(adev); } -static int uvd_v7_0_suspend(void *handle) +static int uvd_v7_0_suspend(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* * Proper cleanups before halting the HW engine: @@ -644,23 +644,22 @@ static int uvd_v7_0_suspend(void *handle) AMD_CG_STATE_GATE); } - r = uvd_v7_0_hw_fini(adev); + r = uvd_v7_0_hw_fini(ip_block); if (r) return r; return amdgpu_uvd_suspend(adev); } -static int uvd_v7_0_resume(void *handle) +static int uvd_v7_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_uvd_resume(adev); + r = amdgpu_uvd_resume(ip_block->adev); if (r) return r; - return uvd_v7_0_hw_init(adev); + return uvd_v7_0_hw_init(ip_block); } /** @@ -1463,104 +1462,6 @@ static void uvd_v7_0_enc_ring_emit_wreg(struct amdgpu_ring *ring, amdgpu_ring_write(ring, val); } -#if 0 -static bool uvd_v7_0_is_idle(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); -} - -static int uvd_v7_0_wait_for_idle(void *handle) -{ - unsigned i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - for (i = 0; i < adev->usec_timeout; i++) { - if (uvd_v7_0_is_idle(handle)) - return 0; - } - return -ETIMEDOUT; -} - -#define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd -static bool uvd_v7_0_check_soft_reset(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - u32 srbm_soft_reset = 0; - u32 tmp = RREG32(mmSRBM_STATUS); - - if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) || - REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) || - (RREG32_SOC15(UVD, ring->me, mmUVD_STATUS) & - AMDGPU_UVD_STATUS_BUSY_MASK)) - srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, - SRBM_SOFT_RESET, SOFT_RESET_UVD, 1); - - if (srbm_soft_reset) { - adev->uvd.inst[ring->me].srbm_soft_reset = srbm_soft_reset; - return true; - } else { - adev->uvd.inst[ring->me].srbm_soft_reset = 0; - return false; - } -} - -static int uvd_v7_0_pre_soft_reset(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (!adev->uvd.inst[ring->me].srbm_soft_reset) - return 0; - - uvd_v7_0_stop(adev); - return 0; -} - -static int uvd_v7_0_soft_reset(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - u32 srbm_soft_reset; - - if (!adev->uvd.inst[ring->me].srbm_soft_reset) - return 0; - srbm_soft_reset = adev->uvd.inst[ring->me].srbm_soft_reset; - - if (srbm_soft_reset) { - u32 tmp; - - tmp = RREG32(mmSRBM_SOFT_RESET); - tmp |= srbm_soft_reset; - dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); - WREG32(mmSRBM_SOFT_RESET, tmp); - tmp = RREG32(mmSRBM_SOFT_RESET); - - udelay(50); - - tmp &= ~srbm_soft_reset; - WREG32(mmSRBM_SOFT_RESET, tmp); - tmp = RREG32(mmSRBM_SOFT_RESET); - - /* Wait a little for things to settle down */ - udelay(50); - } - - return 0; -} - -static int uvd_v7_0_post_soft_reset(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (!adev->uvd.inst[ring->me].srbm_soft_reset) - return 0; - - mdelay(5); - - return uvd_v7_0_start(adev); -} -#endif - static int uvd_v7_0_set_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, unsigned type, @@ -1610,171 +1511,6 @@ static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev, return 0; } -#if 0 -static void uvd_v7_0_set_sw_clock_gating(struct amdgpu_device *adev) -{ - uint32_t data, data1, data2, suvd_flags; - - data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL); - data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE); - data2 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL); - - data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | - UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK); - - suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK | - UVD_SUVD_CGC_GATE__SIT_MASK | - UVD_SUVD_CGC_GATE__SMP_MASK | - UVD_SUVD_CGC_GATE__SCM_MASK | - UVD_SUVD_CGC_GATE__SDB_MASK; - - data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK | - (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) | - (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY)); - - data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK | - UVD_CGC_CTRL__UDEC_CM_MODE_MASK | - UVD_CGC_CTRL__UDEC_IT_MODE_MASK | - UVD_CGC_CTRL__UDEC_DB_MODE_MASK | - UVD_CGC_CTRL__UDEC_MP_MODE_MASK | - UVD_CGC_CTRL__SYS_MODE_MASK | - UVD_CGC_CTRL__UDEC_MODE_MASK | - UVD_CGC_CTRL__MPEG2_MODE_MASK | - UVD_CGC_CTRL__REGS_MODE_MASK | - UVD_CGC_CTRL__RBC_MODE_MASK | - UVD_CGC_CTRL__LMI_MC_MODE_MASK | - UVD_CGC_CTRL__LMI_UMC_MODE_MASK | - UVD_CGC_CTRL__IDCT_MODE_MASK | - UVD_CGC_CTRL__MPRD_MODE_MASK | - UVD_CGC_CTRL__MPC_MODE_MASK | - UVD_CGC_CTRL__LBSI_MODE_MASK | - UVD_CGC_CTRL__LRBBM_MODE_MASK | - UVD_CGC_CTRL__WCB_MODE_MASK | - UVD_CGC_CTRL__VCPU_MODE_MASK | - UVD_CGC_CTRL__JPEG_MODE_MASK | - UVD_CGC_CTRL__JPEG2_MODE_MASK | - UVD_CGC_CTRL__SCPU_MODE_MASK); - data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK | - UVD_SUVD_CGC_CTRL__SIT_MODE_MASK | - UVD_SUVD_CGC_CTRL__SMP_MODE_MASK | - UVD_SUVD_CGC_CTRL__SCM_MODE_MASK | - UVD_SUVD_CGC_CTRL__SDB_MODE_MASK); - data1 |= suvd_flags; - - WREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL, data); - WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, 0); - WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1); - WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL, data2); -} - -static void uvd_v7_0_set_hw_clock_gating(struct amdgpu_device *adev) -{ - uint32_t data, data1, cgc_flags, suvd_flags; - - data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE); - data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE); - - cgc_flags = UVD_CGC_GATE__SYS_MASK | - UVD_CGC_GATE__UDEC_MASK | - UVD_CGC_GATE__MPEG2_MASK | - UVD_CGC_GATE__RBC_MASK | - UVD_CGC_GATE__LMI_MC_MASK | - UVD_CGC_GATE__IDCT_MASK | - UVD_CGC_GATE__MPRD_MASK | - UVD_CGC_GATE__MPC_MASK | - UVD_CGC_GATE__LBSI_MASK | - UVD_CGC_GATE__LRBBM_MASK | - UVD_CGC_GATE__UDEC_RE_MASK | - UVD_CGC_GATE__UDEC_CM_MASK | - UVD_CGC_GATE__UDEC_IT_MASK | - UVD_CGC_GATE__UDEC_DB_MASK | - UVD_CGC_GATE__UDEC_MP_MASK | - UVD_CGC_GATE__WCB_MASK | - UVD_CGC_GATE__VCPU_MASK | - UVD_CGC_GATE__SCPU_MASK | - UVD_CGC_GATE__JPEG_MASK | - UVD_CGC_GATE__JPEG2_MASK; - - suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK | - UVD_SUVD_CGC_GATE__SIT_MASK | - UVD_SUVD_CGC_GATE__SMP_MASK | - UVD_SUVD_CGC_GATE__SCM_MASK | - UVD_SUVD_CGC_GATE__SDB_MASK; - - data |= cgc_flags; - data1 |= suvd_flags; - - WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, data); - WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1); -} - -static void uvd_v7_0_set_bypass_mode(struct amdgpu_device *adev, bool enable) -{ - u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL); - - if (enable) - tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK | - GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK); - else - tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK | - GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK); - - WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp); -} - - -static int uvd_v7_0_set_clockgating_state(void *handle, - enum amd_clockgating_state state) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE); - - uvd_v7_0_set_bypass_mode(adev, enable); - - if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) - return 0; - - if (enable) { - /* disable HW gating and enable Sw gating */ - uvd_v7_0_set_sw_clock_gating(adev); - } else { - /* wait for STATUS to clear */ - if (uvd_v7_0_wait_for_idle(handle)) - return -EBUSY; - - /* enable HW gates because UVD is idle */ - /* uvd_v7_0_set_hw_clock_gating(adev); */ - } - - return 0; -} - -static int uvd_v7_0_set_powergating_state(void *handle, - enum amd_powergating_state state) -{ - /* This doesn't actually powergate the UVD block. - * That's done in the dpm code via the SMC. This - * just re-inits the block as necessary. The actual - * gating still happens in the dpm code. We should - * revisit this when there is a cleaner line between - * the smc and the hw blocks - */ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD)) - return 0; - - WREG32_SOC15(UVD, ring->me, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK); - - if (state == AMD_PG_STATE_GATE) { - uvd_v7_0_stop(adev); - return 0; - } else { - return uvd_v7_0_start(adev); - } -} -#endif - static int uvd_v7_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -1785,7 +1521,6 @@ static int uvd_v7_0_set_clockgating_state(void *handle, const struct amd_ip_funcs uvd_v7_0_ip_funcs = { .name = "uvd_v7_0", .early_init = uvd_v7_0_early_init, - .late_init = NULL, .sw_init = uvd_v7_0_sw_init, .sw_fini = uvd_v7_0_sw_fini, .hw_init = uvd_v7_0_hw_init, @@ -1793,12 +1528,6 @@ const struct amd_ip_funcs uvd_v7_0_ip_funcs = { .prepare_suspend = uvd_v7_0_prepare_suspend, .suspend = uvd_v7_0_suspend, .resume = uvd_v7_0_resume, - .is_idle = NULL /* uvd_v7_0_is_idle */, - .wait_for_idle = NULL /* uvd_v7_0_wait_for_idle */, - .check_soft_reset = NULL /* uvd_v7_0_check_soft_reset */, - .pre_soft_reset = NULL /* uvd_v7_0_pre_soft_reset */, - .soft_reset = NULL /* uvd_v7_0_soft_reset */, - .post_soft_reset = NULL /* uvd_v7_0_post_soft_reset */, .set_clockgating_state = uvd_v7_0_set_clockgating_state, .set_powergating_state = NULL /* uvd_v7_0_set_powergating_state */, }; diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c index 66fada199bda..c1ed91b39415 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c @@ -208,13 +208,13 @@ static bool vce_v2_0_is_idle(void *handle) return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK); } -static int vce_v2_0_wait_for_idle(void *handle) +static int vce_v2_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; unsigned i; for (i = 0; i < adev->usec_timeout; i++) { - if (vce_v2_0_is_idle(handle)) + if (vce_v2_0_is_idle(adev)) return 0; } return -ETIMEDOUT; @@ -274,15 +274,21 @@ static int vce_v2_0_start(struct amdgpu_device *adev) static int vce_v2_0_stop(struct amdgpu_device *adev) { + struct amdgpu_ip_block *ip_block; int i; int status; + if (vce_v2_0_lmi_clean(adev)) { DRM_INFO("vce is not idle \n"); return 0; } - if (vce_v2_0_wait_for_idle(adev)) { + ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCN); + if (!ip_block) + return -EINVAL; + + if (vce_v2_0_wait_for_idle(ip_block)) { DRM_INFO("VCE is busy, Can't set clock gating"); return 0; } @@ -398,9 +404,9 @@ static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable, } } -static int vce_v2_0_early_init(void *handle) +static int vce_v2_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->vce.num_rings = 2; @@ -410,11 +416,11 @@ static int vce_v2_0_early_init(void *handle) return 0; } -static int vce_v2_0_sw_init(void *handle) +static int vce_v2_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* VCE */ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 167, &adev->vce.irq); @@ -444,10 +450,10 @@ static int vce_v2_0_sw_init(void *handle) return r; } -static int vce_v2_0_sw_fini(void *handle) +static int vce_v2_0_sw_fini(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = amdgpu_vce_suspend(adev); if (r) @@ -456,10 +462,10 @@ static int vce_v2_0_sw_fini(void *handle) return amdgpu_vce_sw_fini(adev); } -static int vce_v2_0_hw_init(void *handle) +static int vce_v2_0_hw_init(struct amdgpu_ip_block *ip_block) { int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_asic_set_vce_clocks(adev, 10000, 10000); vce_v2_0_enable_mgcg(adev, true, false); @@ -475,19 +481,17 @@ static int vce_v2_0_hw_init(void *handle) return 0; } -static int vce_v2_0_hw_fini(void *handle) +static int vce_v2_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - cancel_delayed_work_sync(&adev->vce.idle_work); + cancel_delayed_work_sync(&ip_block->adev->vce.idle_work); return 0; } -static int vce_v2_0_suspend(void *handle) +static int vce_v2_0_suspend(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* @@ -513,28 +517,27 @@ static int vce_v2_0_suspend(void *handle) AMD_CG_STATE_GATE); } - r = vce_v2_0_hw_fini(adev); + r = vce_v2_0_hw_fini(ip_block); if (r) return r; return amdgpu_vce_suspend(adev); } -static int vce_v2_0_resume(void *handle) +static int vce_v2_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_vce_resume(adev); + r = amdgpu_vce_resume(ip_block->adev); if (r) return r; - return vce_v2_0_hw_init(adev); + return vce_v2_0_hw_init(ip_block); } -static int vce_v2_0_soft_reset(void *handle) +static int vce_v2_0_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_VCE, 1); mdelay(5); @@ -614,7 +617,6 @@ static int vce_v2_0_set_powergating_state(void *handle, static const struct amd_ip_funcs vce_v2_0_ip_funcs = { .name = "vce_v2_0", .early_init = vce_v2_0_early_init, - .late_init = NULL, .sw_init = vce_v2_0_sw_init, .sw_fini = vce_v2_0_sw_fini, .hw_init = vce_v2_0_hw_init, @@ -626,8 +628,6 @@ static const struct amd_ip_funcs vce_v2_0_ip_funcs = { .soft_reset = vce_v2_0_soft_reset, .set_clockgating_state = vce_v2_0_set_clockgating_state, .set_powergating_state = vce_v2_0_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 4bfba2931b08..6bb318a06f19 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -64,7 +64,7 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx); static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev); static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev); -static int vce_v3_0_wait_for_idle(void *handle); +static int vce_v3_0_wait_for_idle(struct amdgpu_ip_block *ip_block); static int vce_v3_0_set_clockgating_state(void *handle, enum amd_clockgating_state state); /** @@ -396,9 +396,9 @@ static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev) } } -static int vce_v3_0_early_init(void *handle) +static int vce_v3_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev); @@ -415,9 +415,9 @@ static int vce_v3_0_early_init(void *handle) return 0; } -static int vce_v3_0_sw_init(void *handle) +static int vce_v3_0_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int r, i; @@ -453,10 +453,10 @@ static int vce_v3_0_sw_init(void *handle) return r; } -static int vce_v3_0_sw_fini(void *handle) +static int vce_v3_0_sw_fini(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = amdgpu_vce_suspend(adev); if (r) @@ -465,10 +465,10 @@ static int vce_v3_0_sw_fini(void *handle) return amdgpu_vce_sw_fini(adev); } -static int vce_v3_0_hw_init(void *handle) +static int vce_v3_0_hw_init(struct amdgpu_ip_block *ip_block) { int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; vce_v3_0_override_vce_clock_gating(adev, true); @@ -485,14 +485,14 @@ static int vce_v3_0_hw_init(void *handle) return 0; } -static int vce_v3_0_hw_fini(void *handle) +static int vce_v3_0_hw_fini(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; cancel_delayed_work_sync(&adev->vce.idle_work); - r = vce_v3_0_wait_for_idle(handle); + r = vce_v3_0_wait_for_idle(ip_block); if (r) return r; @@ -500,10 +500,10 @@ static int vce_v3_0_hw_fini(void *handle) return vce_v3_0_set_clockgating_state(adev, AMD_CG_STATE_GATE); } -static int vce_v3_0_suspend(void *handle) +static int vce_v3_0_suspend(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* * Proper cleanups before halting the HW engine: @@ -528,23 +528,22 @@ static int vce_v3_0_suspend(void *handle) AMD_CG_STATE_GATE); } - r = vce_v3_0_hw_fini(adev); + r = vce_v3_0_hw_fini(ip_block); if (r) return r; return amdgpu_vce_suspend(adev); } -static int vce_v3_0_resume(void *handle) +static int vce_v3_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_vce_resume(adev); + r = amdgpu_vce_resume(ip_block->adev); if (r) return r; - return vce_v3_0_hw_init(adev); + return vce_v3_0_hw_init(ip_block); } static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx) @@ -609,13 +608,13 @@ static bool vce_v3_0_is_idle(void *handle) return !(RREG32(mmSRBM_STATUS2) & mask); } -static int vce_v3_0_wait_for_idle(void *handle) +static int vce_v3_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) - if (vce_v3_0_is_idle(handle)) + if (vce_v3_0_is_idle(adev)) return 0; return -ETIMEDOUT; @@ -627,9 +626,9 @@ static int vce_v3_0_wait_for_idle(void *handle) #define AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \ VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK) -static bool vce_v3_0_check_soft_reset(void *handle) +static bool vce_v3_0_check_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 srbm_soft_reset = 0; /* According to VCE team , we should use VCE_STATUS instead @@ -668,9 +667,9 @@ static bool vce_v3_0_check_soft_reset(void *handle) } } -static int vce_v3_0_soft_reset(void *handle) +static int vce_v3_0_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 srbm_soft_reset; if (!adev->vce.srbm_soft_reset) @@ -699,29 +698,29 @@ static int vce_v3_0_soft_reset(void *handle) return 0; } -static int vce_v3_0_pre_soft_reset(void *handle) +static int vce_v3_0_pre_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (!adev->vce.srbm_soft_reset) return 0; mdelay(5); - return vce_v3_0_suspend(adev); + return vce_v3_0_suspend(ip_block); } -static int vce_v3_0_post_soft_reset(void *handle) +static int vce_v3_0_post_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (!adev->vce.srbm_soft_reset) return 0; mdelay(5); - return vce_v3_0_resume(adev); + return vce_v3_0_resume(ip_block); } static int vce_v3_0_set_interrupt_state(struct amdgpu_device *adev, @@ -897,7 +896,6 @@ static void vce_v3_0_emit_pipeline_sync(struct amdgpu_ring *ring) static const struct amd_ip_funcs vce_v3_0_ip_funcs = { .name = "vce_v3_0", .early_init = vce_v3_0_early_init, - .late_init = NULL, .sw_init = vce_v3_0_sw_init, .sw_fini = vce_v3_0_sw_fini, .hw_init = vce_v3_0_hw_init, @@ -913,8 +911,6 @@ static const struct amd_ip_funcs vce_v3_0_ip_funcs = { .set_clockgating_state = vce_v3_0_set_clockgating_state, .set_powergating_state = vce_v3_0_set_powergating_state, .get_clockgating_state = vce_v3_0_get_clockgating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c index 0748bf44c880..79ee555768a5 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c @@ -407,9 +407,9 @@ static int vce_v4_0_stop(struct amdgpu_device *adev) return 0; } -static int vce_v4_0_early_init(void *handle) +static int vce_v4_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) /* currently only VCN0 support SRIOV */ adev->vce.num_rings = 1; @@ -422,9 +422,9 @@ static int vce_v4_0_early_init(void *handle) return 0; } -static int vce_v4_0_sw_init(void *handle) +static int vce_v4_0_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; unsigned size; @@ -493,10 +493,10 @@ static int vce_v4_0_sw_init(void *handle) return r; } -static int vce_v4_0_sw_fini(void *handle) +static int vce_v4_0_sw_fini(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* free MM table */ amdgpu_virt_free_mm_table(adev); @@ -513,10 +513,10 @@ static int vce_v4_0_sw_fini(void *handle) return amdgpu_vce_sw_fini(adev); } -static int vce_v4_0_hw_init(void *handle) +static int vce_v4_0_hw_init(struct amdgpu_ip_block *ip_block) { int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) r = vce_v4_0_sriov_start(adev); @@ -536,14 +536,14 @@ static int vce_v4_0_hw_init(void *handle) return 0; } -static int vce_v4_0_hw_fini(void *handle) +static int vce_v4_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; cancel_delayed_work_sync(&adev->vce.idle_work); if (!amdgpu_sriov_vf(adev)) { - /* vce_v4_0_wait_for_idle(handle); */ + /* vce_v4_0_wait_for_idle(ip_block); */ vce_v4_0_stop(adev); } else { /* full access mode, so don't touch any VCE register */ @@ -553,9 +553,9 @@ static int vce_v4_0_hw_fini(void *handle) return 0; } -static int vce_v4_0_suspend(void *handle) +static int vce_v4_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r, idx; if (adev->vce.vcpu_bo == NULL) @@ -594,16 +594,16 @@ static int vce_v4_0_suspend(void *handle) AMD_CG_STATE_GATE); } - r = vce_v4_0_hw_fini(adev); + r = vce_v4_0_hw_fini(ip_block); if (r) return r; return amdgpu_vce_suspend(adev); } -static int vce_v4_0_resume(void *handle) +static int vce_v4_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r, idx; if (adev->vce.vcpu_bo == NULL) @@ -624,7 +624,7 @@ static int vce_v4_0_resume(void *handle) return r; } - return vce_v4_0_hw_init(adev); + return vce_v4_0_hw_init(ip_block); } static void vce_v4_0_mc_resume(struct amdgpu_device *adev) @@ -691,273 +691,6 @@ static int vce_v4_0_set_clockgating_state(void *handle, return 0; } -#if 0 -static bool vce_v4_0_is_idle(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - u32 mask = 0; - - mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_STATUS2__VCE0_BUSY_MASK; - mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_STATUS2__VCE1_BUSY_MASK; - - return !(RREG32(mmSRBM_STATUS2) & mask); -} - -static int vce_v4_0_wait_for_idle(void *handle) -{ - unsigned i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - for (i = 0; i < adev->usec_timeout; i++) - if (vce_v4_0_is_idle(handle)) - return 0; - - return -ETIMEDOUT; -} - -#define VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK 0x00000008L /* AUTO_BUSY */ -#define VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK 0x00000010L /* RB0_BUSY */ -#define VCE_STATUS_VCPU_REPORT_RB1_BUSY_MASK 0x00000020L /* RB1_BUSY */ -#define AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \ - VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK) - -static bool vce_v4_0_check_soft_reset(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - u32 srbm_soft_reset = 0; - - /* According to VCE team , we should use VCE_STATUS instead - * SRBM_STATUS.VCE_BUSY bit for busy status checking. - * GRBM_GFX_INDEX.INSTANCE_INDEX is used to specify which VCE - * instance's registers are accessed - * (0 for 1st instance, 10 for 2nd instance). - * - *VCE_STATUS - *|UENC|ACPI|AUTO ACTIVE|RB1 |RB0 |RB2 | |FW_LOADED|JOB | - *|----+----+-----------+----+----+----+----------+---------+----| - *|bit8|bit7| bit6 |bit5|bit4|bit3| bit2 | bit1 |bit0| - * - * VCE team suggest use bit 3--bit 6 for busy status check - */ - mutex_lock(&adev->grbm_idx_mutex); - WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); - if (RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { - srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); - srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); - } - WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10); - if (RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { - srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); - srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); - } - WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); - mutex_unlock(&adev->grbm_idx_mutex); - - if (srbm_soft_reset) { - adev->vce.srbm_soft_reset = srbm_soft_reset; - return true; - } else { - adev->vce.srbm_soft_reset = 0; - return false; - } -} - -static int vce_v4_0_soft_reset(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - u32 srbm_soft_reset; - - if (!adev->vce.srbm_soft_reset) - return 0; - srbm_soft_reset = adev->vce.srbm_soft_reset; - - if (srbm_soft_reset) { - u32 tmp; - - tmp = RREG32(mmSRBM_SOFT_RESET); - tmp |= srbm_soft_reset; - dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); - WREG32(mmSRBM_SOFT_RESET, tmp); - tmp = RREG32(mmSRBM_SOFT_RESET); - - udelay(50); - - tmp &= ~srbm_soft_reset; - WREG32(mmSRBM_SOFT_RESET, tmp); - tmp = RREG32(mmSRBM_SOFT_RESET); - - /* Wait a little for things to settle down */ - udelay(50); - } - - return 0; -} - -static int vce_v4_0_pre_soft_reset(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (!adev->vce.srbm_soft_reset) - return 0; - - mdelay(5); - - return vce_v4_0_suspend(adev); -} - - -static int vce_v4_0_post_soft_reset(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (!adev->vce.srbm_soft_reset) - return 0; - - mdelay(5); - - return vce_v4_0_resume(adev); -} - -static void vce_v4_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override) -{ - u32 tmp, data; - - tmp = data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_ARB_CTRL)); - if (override) - data |= VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK; - else - data &= ~VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK; - - if (tmp != data) - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_ARB_CTRL), data); -} - -static void vce_v4_0_set_vce_sw_clock_gating(struct amdgpu_device *adev, - bool gated) -{ - u32 data; - - /* Set Override to disable Clock Gating */ - vce_v4_0_override_vce_clock_gating(adev, true); - - /* This function enables MGCG which is controlled by firmware. - With the clocks in the gated state the core is still - accessible but the firmware will throttle the clocks on the - fly as necessary. - */ - if (gated) { - data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B)); - data |= 0x1ff; - data &= ~0xef0000; - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B), data); - - data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING)); - data |= 0x3ff000; - data &= ~0xffc00000; - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING), data); - - data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2)); - data |= 0x2; - data &= ~0x00010000; - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2), data); - - data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING)); - data |= 0x37f; - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING), data); - - data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL)); - data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK | - VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK | - VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK | - 0x8; - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL), data); - } else { - data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B)); - data &= ~0x80010; - data |= 0xe70008; - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B), data); - - data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING)); - data |= 0xffc00000; - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING), data); - - data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2)); - data |= 0x10000; - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2), data); - - data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING)); - data &= ~0xffc00000; - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING), data); - - data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL)); - data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK | - VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK | - VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK | - 0x8); - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL), data); - } - vce_v4_0_override_vce_clock_gating(adev, false); -} - -static void vce_v4_0_set_bypass_mode(struct amdgpu_device *adev, bool enable) -{ - u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL); - - if (enable) - tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK; - else - tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK; - - WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp); -} - -static int vce_v4_0_set_clockgating_state(void *handle, - enum amd_clockgating_state state) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE); - int i; - - if ((adev->asic_type == CHIP_POLARIS10) || - (adev->asic_type == CHIP_TONGA) || - (adev->asic_type == CHIP_FIJI)) - vce_v4_0_set_bypass_mode(adev, enable); - - if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) - return 0; - - mutex_lock(&adev->grbm_idx_mutex); - for (i = 0; i < 2; i++) { - /* Program VCE Instance 0 or 1 if not harvested */ - if (adev->vce.harvest_config & (1 << i)) - continue; - - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i); - - if (enable) { - /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */ - uint32_t data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_A); - data &= ~(0xf | 0xff0); - data |= ((0x0 << 0) | (0x04 << 4)); - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_A, data); - - /* initialize VCE_UENC_CLOCK_GATING: Clock ON/OFF delay */ - data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING); - data &= ~(0xf | 0xff0); - data |= ((0x0 << 0) | (0x04 << 4)); - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING, data); - } - - vce_v4_0_set_vce_sw_clock_gating(adev, enable); - } - - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); - mutex_unlock(&adev->grbm_idx_mutex); - - return 0; -} -#endif - static int vce_v4_0_set_powergating_state(void *handle, enum amd_powergating_state state) { @@ -1076,19 +809,12 @@ static int vce_v4_0_process_interrupt(struct amdgpu_device *adev, const struct amd_ip_funcs vce_v4_0_ip_funcs = { .name = "vce_v4_0", .early_init = vce_v4_0_early_init, - .late_init = NULL, .sw_init = vce_v4_0_sw_init, .sw_fini = vce_v4_0_sw_fini, .hw_init = vce_v4_0_hw_init, .hw_fini = vce_v4_0_hw_fini, .suspend = vce_v4_0_suspend, .resume = vce_v4_0_resume, - .is_idle = NULL /* vce_v4_0_is_idle */, - .wait_for_idle = NULL /* vce_v4_0_wait_for_idle */, - .check_soft_reset = NULL /* vce_v4_0_check_soft_reset */, - .pre_soft_reset = NULL /* vce_v4_0_pre_soft_reset */, - .soft_reset = NULL /* vce_v4_0_soft_reset */, - .post_soft_reset = NULL /* vce_v4_0_post_soft_reset */, .set_clockgating_state = vce_v4_0_set_clockgating_state, .set_powergating_state = vce_v4_0_set_powergating_state, }; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index ecdfbfefd66a..10e99c926fb8 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -95,14 +95,14 @@ static void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring); /** * vcn_v1_0_early_init - set function pointers and load microcode * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers * Load microcode from filesystem */ -static int vcn_v1_0_early_init(void *handle) +static int vcn_v1_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->vcn.num_enc_rings = 2; @@ -110,7 +110,7 @@ static int vcn_v1_0_early_init(void *handle) vcn_v1_0_set_enc_ring_funcs(adev); vcn_v1_0_set_irq_funcs(adev); - jpeg_v1_0_early_init(handle); + jpeg_v1_0_early_init(ip_block); return amdgpu_vcn_early_init(adev); } @@ -118,17 +118,17 @@ static int vcn_v1_0_early_init(void *handle) /** * vcn_v1_0_sw_init - sw init for VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Load firmware and sw initialization */ -static int vcn_v1_0_sw_init(void *handle) +static int vcn_v1_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int i, r; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_1_0); uint32_t *ptr; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* VCN DEC TRAP */ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, @@ -197,7 +197,7 @@ static int vcn_v1_0_sw_init(void *handle) amdgpu_vcn_fwlog_init(adev->vcn.inst); } - r = jpeg_v1_0_sw_init(handle); + r = jpeg_v1_0_sw_init(ip_block); /* Allocate memory for VCN IP Dump buffer */ ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL); @@ -213,20 +213,20 @@ static int vcn_v1_0_sw_init(void *handle) /** * vcn_v1_0_sw_fini - sw fini for VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * VCN suspend and free up sw allocation */ -static int vcn_v1_0_sw_fini(void *handle) +static int vcn_v1_0_sw_fini(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = amdgpu_vcn_suspend(adev); if (r) return r; - jpeg_v1_0_sw_fini(handle); + jpeg_v1_0_sw_fini(ip_block); r = amdgpu_vcn_sw_fini(adev); @@ -238,13 +238,13 @@ static int vcn_v1_0_sw_fini(void *handle) /** * vcn_v1_0_hw_init - start and test VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Initialize the hardware, boot up the VCPU and do some testing */ -static int vcn_v1_0_hw_init(void *handle) +static int vcn_v1_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec; int i, r; @@ -268,13 +268,13 @@ static int vcn_v1_0_hw_init(void *handle) /** * vcn_v1_0_hw_fini - stop the hardware block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the VCN block, mark ring as not ready any more */ -static int vcn_v1_0_hw_fini(void *handle) +static int vcn_v1_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; cancel_delayed_work_sync(&adev->vcn.idle_work); @@ -290,14 +290,14 @@ static int vcn_v1_0_hw_fini(void *handle) /** * vcn_v1_0_suspend - suspend VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * HW fini and suspend VCN block */ -static int vcn_v1_0_suspend(void *handle) +static int vcn_v1_0_suspend(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; bool idle_work_unexecuted; idle_work_unexecuted = cancel_delayed_work_sync(&adev->vcn.idle_work); @@ -306,7 +306,7 @@ static int vcn_v1_0_suspend(void *handle) amdgpu_dpm_enable_uvd(adev, false); } - r = vcn_v1_0_hw_fini(adev); + r = vcn_v1_0_hw_fini(ip_block); if (r) return r; @@ -318,20 +318,19 @@ static int vcn_v1_0_suspend(void *handle) /** * vcn_v1_0_resume - resume VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Resume firmware and hw init VCN block */ -static int vcn_v1_0_resume(void *handle) +static int vcn_v1_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_vcn_resume(adev); + r = amdgpu_vcn_resume(ip_block->adev); if (r) return r; - r = vcn_v1_0_hw_init(adev); + r = vcn_v1_0_hw_init(ip_block); return r; } @@ -1384,9 +1383,9 @@ static bool vcn_v1_0_is_idle(void *handle) return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == UVD_STATUS__IDLE); } -static int vcn_v1_0_wait_for_idle(void *handle) +static int vcn_v1_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int ret; ret = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE, @@ -1925,9 +1924,9 @@ void vcn_v1_0_ring_end_use(struct amdgpu_ring *ring) mutex_unlock(&ring->adev->vcn.vcn1_jpeg1_workaround); } -static void vcn_v1_0_print_ip_state(void *handle, struct drm_printer *p) +static void vcn_v1_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_1_0); uint32_t inst_off, is_powered; @@ -1957,9 +1956,9 @@ static void vcn_v1_0_print_ip_state(void *handle, struct drm_printer *p) } } -static void vcn_v1_0_dump_ip_state(void *handle) +static void vcn_v1_0_dump_ip_state(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; bool is_powered; uint32_t inst_off; @@ -1988,7 +1987,6 @@ static void vcn_v1_0_dump_ip_state(void *handle) static const struct amd_ip_funcs vcn_v1_0_ip_funcs = { .name = "vcn_v1_0", .early_init = vcn_v1_0_early_init, - .late_init = NULL, .sw_init = vcn_v1_0_sw_init, .sw_fini = vcn_v1_0_sw_fini, .hw_init = vcn_v1_0_hw_init, @@ -1997,10 +1995,6 @@ static const struct amd_ip_funcs vcn_v1_0_ip_funcs = { .resume = vcn_v1_0_resume, .is_idle = vcn_v1_0_is_idle, .wait_for_idle = vcn_v1_0_wait_for_idle, - .check_soft_reset = NULL /* vcn_v1_0_check_soft_reset */, - .pre_soft_reset = NULL /* vcn_v1_0_pre_soft_reset */, - .soft_reset = NULL /* vcn_v1_0_soft_reset */, - .post_soft_reset = NULL /* vcn_v1_0_post_soft_reset */, .set_clockgating_state = vcn_v1_0_set_clockgating_state, .set_powergating_state = vcn_v1_0_set_powergating_state, .dump_ip_state = vcn_v1_0_dump_ip_state, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index bfd067e2d2f1..e0322cbca3ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -100,14 +100,14 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev); /** * vcn_v2_0_early_init - set function pointers and load microcode * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers * Load microcode from filesystem */ -static int vcn_v2_0_early_init(void *handle) +static int vcn_v2_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) adev->vcn.num_enc_rings = 1; @@ -124,17 +124,17 @@ static int vcn_v2_0_early_init(void *handle) /** * vcn_v2_0_sw_init - sw init for VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Load firmware and sw initialization */ -static int vcn_v2_0_sw_init(void *handle) +static int vcn_v2_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int i, r; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_0); uint32_t *ptr; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; volatile struct amdgpu_fw_shared *fw_shared; /* VCN DEC TRAP */ @@ -237,14 +237,14 @@ static int vcn_v2_0_sw_init(void *handle) /** * vcn_v2_0_sw_fini - sw fini for VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * VCN suspend and free up sw allocation */ -static int vcn_v2_0_sw_fini(void *handle) +static int vcn_v2_0_sw_fini(struct amdgpu_ip_block *ip_block) { int r, idx; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr; if (drm_dev_enter(adev_to_drm(adev), &idx)) { @@ -268,13 +268,13 @@ static int vcn_v2_0_sw_fini(void *handle) /** * vcn_v2_0_hw_init - start and test VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Initialize the hardware, boot up the VCPU and do some testing */ -static int vcn_v2_0_hw_init(void *handle) +static int vcn_v2_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec; int i, r; @@ -305,13 +305,13 @@ static int vcn_v2_0_hw_init(void *handle) /** * vcn_v2_0_hw_fini - stop the hardware block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the VCN block, mark ring as not ready any more */ -static int vcn_v2_0_hw_fini(void *handle) +static int vcn_v2_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; cancel_delayed_work_sync(&adev->vcn.idle_work); @@ -326,20 +326,19 @@ static int vcn_v2_0_hw_fini(void *handle) /** * vcn_v2_0_suspend - suspend VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * HW fini and suspend VCN block */ -static int vcn_v2_0_suspend(void *handle) +static int vcn_v2_0_suspend(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = vcn_v2_0_hw_fini(adev); + r = vcn_v2_0_hw_fini(ip_block); if (r) return r; - r = amdgpu_vcn_suspend(adev); + r = amdgpu_vcn_suspend(ip_block->adev); return r; } @@ -347,20 +346,19 @@ static int vcn_v2_0_suspend(void *handle) /** * vcn_v2_0_resume - resume VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Resume firmware and hw init VCN block */ -static int vcn_v2_0_resume(void *handle) +static int vcn_v2_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_vcn_resume(adev); + r = amdgpu_vcn_resume(ip_block->adev); if (r) return r; - r = vcn_v2_0_hw_init(adev); + r = vcn_v2_0_hw_init(ip_block); return r; } @@ -1326,9 +1324,9 @@ static bool vcn_v2_0_is_idle(void *handle) return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == UVD_STATUS__IDLE); } -static int vcn_v2_0_wait_for_idle(void *handle) +static int vcn_v2_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int ret; ret = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE, @@ -2034,9 +2032,9 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev) return vcn_v2_0_start_mmsch(adev, &adev->virt.mm_table); } -static void vcn_v2_0_print_ip_state(void *handle, struct drm_printer *p) +static void vcn_v2_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_0); uint32_t inst_off, is_powered; @@ -2066,9 +2064,9 @@ static void vcn_v2_0_print_ip_state(void *handle, struct drm_printer *p) } } -static void vcn_v2_0_dump_ip_state(void *handle) +static void vcn_v2_0_dump_ip_state(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; bool is_powered; uint32_t inst_off; @@ -2097,7 +2095,6 @@ static void vcn_v2_0_dump_ip_state(void *handle) static const struct amd_ip_funcs vcn_v2_0_ip_funcs = { .name = "vcn_v2_0", .early_init = vcn_v2_0_early_init, - .late_init = NULL, .sw_init = vcn_v2_0_sw_init, .sw_fini = vcn_v2_0_sw_fini, .hw_init = vcn_v2_0_hw_init, @@ -2106,10 +2103,6 @@ static const struct amd_ip_funcs vcn_v2_0_ip_funcs = { .resume = vcn_v2_0_resume, .is_idle = vcn_v2_0_is_idle, .wait_for_idle = vcn_v2_0_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = vcn_v2_0_set_clockgating_state, .set_powergating_state = vcn_v2_0_set_powergating_state, .dump_ip_state = vcn_v2_0_dump_ip_state, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index 04e9e806e318..6aa08281d094 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -110,14 +110,14 @@ static int amdgpu_ih_clientid_vcns[] = { /** * vcn_v2_5_early_init - set function pointers and load microcode * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers * Load microcode from filesystem */ -static int vcn_v2_5_early_init(void *handle) +static int vcn_v2_5_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) { adev->vcn.num_vcn_inst = 2; @@ -151,17 +151,17 @@ static int vcn_v2_5_early_init(void *handle) /** * vcn_v2_5_sw_init - sw init for VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Load firmware and sw initialization */ -static int vcn_v2_5_sw_init(void *handle) +static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int i, j, r; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5); uint32_t *ptr; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (j = 0; j < adev->vcn.num_vcn_inst; j++) { if (adev->vcn.harvest_config & (1 << j)) @@ -295,14 +295,14 @@ static int vcn_v2_5_sw_init(void *handle) /** * vcn_v2_5_sw_fini - sw fini for VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * VCN suspend and free up sw allocation */ -static int vcn_v2_5_sw_fini(void *handle) +static int vcn_v2_5_sw_fini(struct amdgpu_ip_block *ip_block) { int i, r, idx; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; volatile struct amdgpu_fw_shared *fw_shared; if (drm_dev_enter(adev_to_drm(adev), &idx)) { @@ -333,13 +333,13 @@ static int vcn_v2_5_sw_fini(void *handle) /** * vcn_v2_5_hw_init - start and test VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Initialize the hardware, boot up the VCPU and do some testing */ -static int vcn_v2_5_hw_init(void *handle) +static int vcn_v2_5_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int i, j, r = 0; @@ -381,13 +381,13 @@ static int vcn_v2_5_hw_init(void *handle) /** * vcn_v2_5_hw_fini - stop the hardware block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the VCN block, mark ring as not ready any more */ -static int vcn_v2_5_hw_fini(void *handle) +static int vcn_v2_5_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; cancel_delayed_work_sync(&adev->vcn.idle_work); @@ -411,20 +411,19 @@ static int vcn_v2_5_hw_fini(void *handle) /** * vcn_v2_5_suspend - suspend VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * HW fini and suspend VCN block */ -static int vcn_v2_5_suspend(void *handle) +static int vcn_v2_5_suspend(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = vcn_v2_5_hw_fini(adev); + r = vcn_v2_5_hw_fini(ip_block); if (r) return r; - r = amdgpu_vcn_suspend(adev); + r = amdgpu_vcn_suspend(ip_block->adev); return r; } @@ -432,20 +431,19 @@ static int vcn_v2_5_suspend(void *handle) /** * vcn_v2_5_resume - resume VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Resume firmware and hw init VCN block */ -static int vcn_v2_5_resume(void *handle) +static int vcn_v2_5_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_vcn_resume(adev); + r = amdgpu_vcn_resume(ip_block->adev); if (r) return r; - r = vcn_v2_5_hw_init(adev); + r = vcn_v2_5_hw_init(ip_block); return r; } @@ -1786,9 +1784,9 @@ static bool vcn_v2_5_is_idle(void *handle) return ret; } -static int vcn_v2_5_wait_for_idle(void *handle) +static int vcn_v2_5_wait_for_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, ret = 0; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { @@ -1926,9 +1924,9 @@ static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev) } } -static void vcn_v2_5_print_ip_state(void *handle, struct drm_printer *p) +static void vcn_v2_5_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5); uint32_t inst_off, is_powered; @@ -1958,9 +1956,9 @@ static void vcn_v2_5_print_ip_state(void *handle, struct drm_printer *p) } } -static void vcn_v2_5_dump_ip_state(void *handle) +static void vcn_v2_5_dump_ip_state(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; bool is_powered; uint32_t inst_off; @@ -1989,7 +1987,6 @@ static void vcn_v2_5_dump_ip_state(void *handle) static const struct amd_ip_funcs vcn_v2_5_ip_funcs = { .name = "vcn_v2_5", .early_init = vcn_v2_5_early_init, - .late_init = NULL, .sw_init = vcn_v2_5_sw_init, .sw_fini = vcn_v2_5_sw_fini, .hw_init = vcn_v2_5_hw_init, @@ -1998,10 +1995,6 @@ static const struct amd_ip_funcs vcn_v2_5_ip_funcs = { .resume = vcn_v2_5_resume, .is_idle = vcn_v2_5_is_idle, .wait_for_idle = vcn_v2_5_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = vcn_v2_5_set_clockgating_state, .set_powergating_state = vcn_v2_5_set_powergating_state, .dump_ip_state = vcn_v2_5_dump_ip_state, @@ -2011,7 +2004,6 @@ static const struct amd_ip_funcs vcn_v2_5_ip_funcs = { static const struct amd_ip_funcs vcn_v2_6_ip_funcs = { .name = "vcn_v2_6", .early_init = vcn_v2_5_early_init, - .late_init = NULL, .sw_init = vcn_v2_5_sw_init, .sw_fini = vcn_v2_5_sw_fini, .hw_init = vcn_v2_5_hw_init, @@ -2020,10 +2012,6 @@ static const struct amd_ip_funcs vcn_v2_6_ip_funcs = { .resume = vcn_v2_5_resume, .is_idle = vcn_v2_5_is_idle, .wait_for_idle = vcn_v2_5_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = vcn_v2_5_set_clockgating_state, .set_powergating_state = vcn_v2_5_set_powergating_state, .dump_ip_state = vcn_v2_5_dump_ip_state, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c index 65dd68b32280..6732ad7f16f5 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c @@ -116,14 +116,14 @@ static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring); /** * vcn_v3_0_early_init - set function pointers and load microcode * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers * Load microcode from filesystem */ -static int vcn_v3_0_early_init(void *handle) +static int vcn_v3_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) { adev->vcn.num_vcn_inst = VCN_INSTANCES_SIENNA_CICHLID; @@ -153,18 +153,18 @@ static int vcn_v3_0_early_init(void *handle) /** * vcn_v3_0_sw_init - sw init for VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Load firmware and sw initialization */ -static int vcn_v3_0_sw_init(void *handle) +static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int i, j, r; int vcn_doorbell_index = 0; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0); uint32_t *ptr; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = amdgpu_vcn_sw_init(adev); if (r) @@ -299,13 +299,13 @@ static int vcn_v3_0_sw_init(void *handle) /** * vcn_v3_0_sw_fini - sw fini for VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * VCN suspend and free up sw allocation */ -static int vcn_v3_0_sw_fini(void *handle) +static int vcn_v3_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, r, idx; if (drm_dev_enter(adev_to_drm(adev), &idx)) { @@ -338,13 +338,13 @@ static int vcn_v3_0_sw_fini(void *handle) /** * vcn_v3_0_hw_init - start and test VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Initialize the hardware, boot up the VCPU and do some testing */ -static int vcn_v3_0_hw_init(void *handle) +static int vcn_v3_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int i, j, r; @@ -413,13 +413,13 @@ static int vcn_v3_0_hw_init(void *handle) /** * vcn_v3_0_hw_fini - stop the hardware block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the VCN block, mark ring as not ready any more */ -static int vcn_v3_0_hw_fini(void *handle) +static int vcn_v3_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; cancel_delayed_work_sync(&adev->vcn.idle_work); @@ -443,20 +443,19 @@ static int vcn_v3_0_hw_fini(void *handle) /** * vcn_v3_0_suspend - suspend VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * HW fini and suspend VCN block */ -static int vcn_v3_0_suspend(void *handle) +static int vcn_v3_0_suspend(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = vcn_v3_0_hw_fini(adev); + r = vcn_v3_0_hw_fini(ip_block); if (r) return r; - r = amdgpu_vcn_suspend(adev); + r = amdgpu_vcn_suspend(ip_block->adev); return r; } @@ -464,20 +463,19 @@ static int vcn_v3_0_suspend(void *handle) /** * vcn_v3_0_resume - resume VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Resume firmware and hw init VCN block */ -static int vcn_v3_0_resume(void *handle) +static int vcn_v3_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_vcn_resume(adev); + r = amdgpu_vcn_resume(ip_block->adev); if (r) return r; - r = vcn_v3_0_hw_init(adev); + r = vcn_v3_0_hw_init(ip_block); return r; } @@ -2116,9 +2114,9 @@ static bool vcn_v3_0_is_idle(void *handle) return ret; } -static int vcn_v3_0_wait_for_idle(void *handle) +static int vcn_v3_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, ret = 0; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { @@ -2251,9 +2249,9 @@ static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev) } } -static void vcn_v3_0_print_ip_state(void *handle, struct drm_printer *p) +static void vcn_v3_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0); uint32_t inst_off; @@ -2284,9 +2282,9 @@ static void vcn_v3_0_print_ip_state(void *handle, struct drm_printer *p) } } -static void vcn_v3_0_dump_ip_state(void *handle) +static void vcn_v3_0_dump_ip_state(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; bool is_powered; uint32_t inst_off; @@ -2315,7 +2313,6 @@ static void vcn_v3_0_dump_ip_state(void *handle) static const struct amd_ip_funcs vcn_v3_0_ip_funcs = { .name = "vcn_v3_0", .early_init = vcn_v3_0_early_init, - .late_init = NULL, .sw_init = vcn_v3_0_sw_init, .sw_fini = vcn_v3_0_sw_fini, .hw_init = vcn_v3_0_hw_init, @@ -2324,10 +2321,6 @@ static const struct amd_ip_funcs vcn_v3_0_ip_funcs = { .resume = vcn_v3_0_resume, .is_idle = vcn_v3_0_is_idle, .wait_for_idle = vcn_v3_0_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = vcn_v3_0_set_clockgating_state, .set_powergating_state = vcn_v3_0_set_powergating_state, .dump_ip_state = vcn_v3_0_dump_ip_state, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c index 26c6f10a8c8f..fcc8511e91ee 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c @@ -106,14 +106,14 @@ static void vcn_v4_0_set_ras_funcs(struct amdgpu_device *adev); /** * vcn_v4_0_early_init - set function pointers and load microcode * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers * Load microcode from filesystem */ -static int vcn_v4_0_early_init(void *handle) +static int vcn_v4_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; if (amdgpu_sriov_vf(adev)) { @@ -164,14 +164,14 @@ static int vcn_v4_0_fw_shared_init(struct amdgpu_device *adev, int inst_idx) /** * vcn_v4_0_sw_init - sw init for VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Load firmware and sw initialization */ -static int vcn_v4_0_sw_init(void *handle) +static int vcn_v4_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, r; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0); uint32_t *ptr; @@ -225,6 +225,10 @@ static int vcn_v4_0_sw_init(void *handle) vcn_v4_0_fw_shared_init(adev, i); } + /* TODO: Add queue reset mask when FW fully supports it */ + adev->vcn.supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]); + if (amdgpu_sriov_vf(adev)) { r = amdgpu_virt_alloc_mm_table(adev); if (r) @@ -247,19 +251,23 @@ static int vcn_v4_0_sw_init(void *handle) adev->vcn.ip_dump = ptr; } + r = amdgpu_vcn_sysfs_reset_mask_init(adev); + if (r) + return r; + return 0; } /** * vcn_v4_0_sw_fini - sw fini for VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * VCN suspend and free up sw allocation */ -static int vcn_v4_0_sw_fini(void *handle) +static int vcn_v4_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, r, idx; if (drm_dev_enter(adev_to_drm(adev), &idx)) { @@ -284,6 +292,7 @@ static int vcn_v4_0_sw_fini(void *handle) if (r) return r; + amdgpu_vcn_sysfs_reset_mask_fini(adev); r = amdgpu_vcn_sw_fini(adev); kfree(adev->vcn.ip_dump); @@ -294,13 +303,13 @@ static int vcn_v4_0_sw_fini(void *handle) /** * vcn_v4_0_hw_init - start and test VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Initialize the hardware, boot up the VCPU and do some testing */ -static int vcn_v4_0_hw_init(void *handle) +static int vcn_v4_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int i, r; @@ -341,13 +350,13 @@ static int vcn_v4_0_hw_init(void *handle) /** * vcn_v4_0_hw_fini - stop the hardware block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the VCN block, mark ring as not ready any more */ -static int vcn_v4_0_hw_fini(void *handle) +static int vcn_v4_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; cancel_delayed_work_sync(&adev->vcn.idle_work); @@ -372,20 +381,19 @@ static int vcn_v4_0_hw_fini(void *handle) /** * vcn_v4_0_suspend - suspend VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * HW fini and suspend VCN block */ -static int vcn_v4_0_suspend(void *handle) +static int vcn_v4_0_suspend(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = vcn_v4_0_hw_fini(adev); + r = vcn_v4_0_hw_fini(ip_block); if (r) return r; - r = amdgpu_vcn_suspend(adev); + r = amdgpu_vcn_suspend(ip_block->adev); return r; } @@ -393,20 +401,19 @@ static int vcn_v4_0_suspend(void *handle) /** * vcn_v4_0_resume - resume VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Resume firmware and hw init VCN block */ -static int vcn_v4_0_resume(void *handle) +static int vcn_v4_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_vcn_resume(adev); + r = amdgpu_vcn_resume(ip_block->adev); if (r) return r; - r = vcn_v4_0_hw_init(adev); + r = vcn_v4_0_hw_init(ip_block); return r; } @@ -1975,13 +1982,13 @@ static bool vcn_v4_0_is_idle(void *handle) /** * vcn_v4_0_wait_for_idle - wait for VCN block idle * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Wait for VCN block idle */ -static int vcn_v4_0_wait_for_idle(void *handle) +static int vcn_v4_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, ret = 0; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { @@ -2158,9 +2165,9 @@ static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev) } } -static void vcn_v4_0_print_ip_state(void *handle, struct drm_printer *p) +static void vcn_v4_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0); uint32_t inst_off, is_powered; @@ -2190,9 +2197,9 @@ static void vcn_v4_0_print_ip_state(void *handle, struct drm_printer *p) } } -static void vcn_v4_0_dump_ip_state(void *handle) +static void vcn_v4_0_dump_ip_state(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; bool is_powered; uint32_t inst_off; @@ -2222,7 +2229,6 @@ static void vcn_v4_0_dump_ip_state(void *handle) static const struct amd_ip_funcs vcn_v4_0_ip_funcs = { .name = "vcn_v4_0", .early_init = vcn_v4_0_early_init, - .late_init = NULL, .sw_init = vcn_v4_0_sw_init, .sw_fini = vcn_v4_0_sw_fini, .hw_init = vcn_v4_0_hw_init, @@ -2231,10 +2237,6 @@ static const struct amd_ip_funcs vcn_v4_0_ip_funcs = { .resume = vcn_v4_0_resume, .is_idle = vcn_v4_0_is_idle, .wait_for_idle = vcn_v4_0_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = vcn_v4_0_set_clockgating_state, .set_powergating_state = vcn_v4_0_set_powergating_state, .dump_ip_state = vcn_v4_0_dump_ip_state, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c index 0fda70336300..3f69b9b2bcd0 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c @@ -95,16 +95,23 @@ static void vcn_v4_0_3_unified_ring_set_wptr(struct amdgpu_ring *ring); static void vcn_v4_0_3_set_ras_funcs(struct amdgpu_device *adev); static void vcn_v4_0_3_enable_ras(struct amdgpu_device *adev, int inst_idx, bool indirect); + +static inline bool vcn_v4_0_3_normalizn_reqd(struct amdgpu_device *adev) +{ + return (amdgpu_sriov_vf(adev) || + (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))); +} + /** * vcn_v4_0_3_early_init - set function pointers * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers */ -static int vcn_v4_0_3_early_init(void *handle) +static int vcn_v4_0_3_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* re-use enc ring as unified ring */ adev->vcn.num_enc_rings = 1; @@ -116,16 +123,30 @@ static int vcn_v4_0_3_early_init(void *handle) return amdgpu_vcn_early_init(adev); } +static int vcn_v4_0_3_fw_shared_init(struct amdgpu_device *adev, int inst_idx) +{ + struct amdgpu_vcn4_fw_shared *fw_shared; + + fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; + fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE); + fw_shared->sq.is_enabled = 1; + + if (amdgpu_vcnfw_log) + amdgpu_vcn_fwlog_init(&adev->vcn.inst[inst_idx]); + + return 0; +} + /** * vcn_v4_0_3_sw_init - sw init for VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Load firmware and sw initialization */ -static int vcn_v4_0_3_sw_init(void *handle) +static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int i, r, vcn_inst; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3); @@ -148,8 +169,6 @@ static int vcn_v4_0_3_sw_init(void *handle) return r; for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - volatile struct amdgpu_vcn4_fw_shared *fw_shared; - vcn_inst = GET_INST(VCN, i); ring = &adev->vcn.inst[i].ring_enc[0]; @@ -172,14 +191,13 @@ static int vcn_v4_0_3_sw_init(void *handle) if (r) return r; - fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; - fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE); - fw_shared->sq.is_enabled = true; - - if (amdgpu_vcnfw_log) - amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]); + vcn_v4_0_3_fw_shared_init(adev, i); } + /* TODO: Add queue reset mask when FW fully supports it */ + adev->vcn.supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]); + if (amdgpu_sriov_vf(adev)) { r = amdgpu_virt_alloc_mm_table(adev); if (r) @@ -206,19 +224,23 @@ static int vcn_v4_0_3_sw_init(void *handle) adev->vcn.ip_dump = ptr; } + r = amdgpu_vcn_sysfs_reset_mask_init(adev); + if (r) + return r; + return 0; } /** * vcn_v4_0_3_sw_fini - sw fini for VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * VCN suspend and free up sw allocation */ -static int vcn_v4_0_3_sw_fini(void *handle) +static int vcn_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, r, idx; if (drm_dev_enter(&adev->ddev, &idx)) { @@ -239,6 +261,7 @@ static int vcn_v4_0_3_sw_fini(void *handle) if (r) return r; + amdgpu_vcn_sysfs_reset_mask_fini(adev); r = amdgpu_vcn_sw_fini(adev); kfree(adev->vcn.ip_dump); @@ -249,13 +272,13 @@ static int vcn_v4_0_3_sw_fini(void *handle) /** * vcn_v4_0_3_hw_init - start and test VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Initialize the hardware, boot up the VCPU and do some testing */ -static int vcn_v4_0_3_hw_init(void *handle) +static int vcn_v4_0_3_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int i, r, vcn_inst; @@ -273,6 +296,8 @@ static int vcn_v4_0_3_hw_init(void *handle) } } else { for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + struct amdgpu_vcn4_fw_shared *fw_shared; + vcn_inst = GET_INST(VCN, i); ring = &adev->vcn.inst[i].ring_enc[0]; @@ -296,6 +321,11 @@ static int vcn_v4_0_3_hw_init(void *handle) regVCN_RB1_DB_CTRL); } + /* Re-init fw_shared when RAS fatal error occurred */ + fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; + if (!fw_shared->sq.is_enabled) + vcn_v4_0_3_fw_shared_init(adev, i); + r = amdgpu_ring_test_helper(ring); if (r) return r; @@ -308,13 +338,13 @@ static int vcn_v4_0_3_hw_init(void *handle) /** * vcn_v4_0_3_hw_fini - stop the hardware block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the VCN block, mark ring as not ready any more */ -static int vcn_v4_0_3_hw_fini(void *handle) +static int vcn_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; cancel_delayed_work_sync(&adev->vcn.idle_work); @@ -327,20 +357,19 @@ static int vcn_v4_0_3_hw_fini(void *handle) /** * vcn_v4_0_3_suspend - suspend VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * HW fini and suspend VCN block */ -static int vcn_v4_0_3_suspend(void *handle) +static int vcn_v4_0_3_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = vcn_v4_0_3_hw_fini(adev); + r = vcn_v4_0_3_hw_fini(ip_block); if (r) return r; - r = amdgpu_vcn_suspend(adev); + r = amdgpu_vcn_suspend(ip_block->adev); return r; } @@ -348,20 +377,19 @@ static int vcn_v4_0_3_suspend(void *handle) /** * vcn_v4_0_3_resume - resume VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Resume firmware and hw init VCN block */ -static int vcn_v4_0_3_resume(void *handle) +static int vcn_v4_0_3_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = amdgpu_vcn_resume(adev); + r = amdgpu_vcn_resume(ip_block->adev); if (r) return r; - r = vcn_v4_0_3_hw_init(adev); + r = vcn_v4_0_3_hw_init(ip_block); return r; } @@ -1430,8 +1458,8 @@ static uint64_t vcn_v4_0_3_unified_ring_get_wptr(struct amdgpu_ring *ring) static void vcn_v4_0_3_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, uint32_t val, uint32_t mask) { - /* For VF, only local offsets should be used */ - if (amdgpu_sriov_vf(ring->adev)) + /* Use normalized offsets when required */ + if (vcn_v4_0_3_normalizn_reqd(ring->adev)) reg = NORMALIZE_VCN_REG_OFFSET(reg); amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT); @@ -1442,8 +1470,8 @@ static void vcn_v4_0_3_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t static void vcn_v4_0_3_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val) { - /* For VF, only local offsets should be used */ - if (amdgpu_sriov_vf(ring->adev)) + /* Use normalized offsets when required */ + if (vcn_v4_0_3_normalizn_reqd(ring->adev)) reg = NORMALIZE_VCN_REG_OFFSET(reg); amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE); @@ -1567,13 +1595,13 @@ static bool vcn_v4_0_3_is_idle(void *handle) /** * vcn_v4_0_3_wait_for_idle - wait for VCN block idle * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Wait for VCN block idle */ -static int vcn_v4_0_3_wait_for_idle(void *handle) +static int vcn_v4_0_3_wait_for_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, ret = 0; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { @@ -1733,9 +1761,9 @@ static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev) adev->vcn.inst->irq.funcs = &vcn_v4_0_3_irq_funcs; } -static void vcn_v4_0_3_print_ip_state(void *handle, struct drm_printer *p) +static void vcn_v4_0_3_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3); uint32_t inst_off, is_powered; @@ -1765,9 +1793,9 @@ static void vcn_v4_0_3_print_ip_state(void *handle, struct drm_printer *p) } } -static void vcn_v4_0_3_dump_ip_state(void *handle) +static void vcn_v4_0_3_dump_ip_state(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; bool is_powered; uint32_t inst_off, inst_id; @@ -1798,7 +1826,6 @@ static void vcn_v4_0_3_dump_ip_state(void *handle) static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = { .name = "vcn_v4_0_3", .early_init = vcn_v4_0_3_early_init, - .late_init = NULL, .sw_init = vcn_v4_0_3_sw_init, .sw_fini = vcn_v4_0_3_sw_fini, .hw_init = vcn_v4_0_3_hw_init, @@ -1807,10 +1834,6 @@ static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = { .resume = vcn_v4_0_3_resume, .is_idle = vcn_v4_0_3_is_idle, .wait_for_idle = vcn_v4_0_3_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = vcn_v4_0_3_set_clockgating_state, .set_powergating_state = vcn_v4_0_3_set_powergating_state, .dump_ip_state = vcn_v4_0_3_dump_ip_state, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c index 9d4f5352a62c..71961fb3f7ff 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c @@ -104,14 +104,14 @@ static void vcn_v4_0_5_unified_ring_set_wptr(struct amdgpu_ring *ring); /** * vcn_v4_0_5_early_init - set function pointers and load microcode * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers * Load microcode from filesystem */ -static int vcn_v4_0_5_early_init(void *handle) +static int vcn_v4_0_5_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* re-use enc ring as unified ring */ adev->vcn.num_enc_rings = 1; @@ -124,14 +124,14 @@ static int vcn_v4_0_5_early_init(void *handle) /** * vcn_v4_0_5_sw_init - sw init for VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Load firmware and sw initialization */ -static int vcn_v4_0_5_sw_init(void *handle) +static int vcn_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, r; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_5); uint32_t *ptr; @@ -220,13 +220,13 @@ static int vcn_v4_0_5_sw_init(void *handle) /** * vcn_v4_0_5_sw_fini - sw fini for VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * VCN suspend and free up sw allocation */ -static int vcn_v4_0_5_sw_fini(void *handle) +static int vcn_v4_0_5_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, r, idx; if (drm_dev_enter(adev_to_drm(adev), &idx)) { @@ -261,13 +261,13 @@ static int vcn_v4_0_5_sw_fini(void *handle) /** * vcn_v4_0_5_hw_init - start and test VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Initialize the hardware, boot up the VCPU and do some testing */ -static int vcn_v4_0_5_hw_init(void *handle) +static int vcn_v4_0_5_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int i, r; @@ -291,13 +291,13 @@ static int vcn_v4_0_5_hw_init(void *handle) /** * vcn_v4_0_5_hw_fini - stop the hardware block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the VCN block, mark ring as not ready any more */ -static int vcn_v4_0_5_hw_fini(void *handle) +static int vcn_v4_0_5_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; cancel_delayed_work_sync(&adev->vcn.idle_work); @@ -320,20 +320,19 @@ static int vcn_v4_0_5_hw_fini(void *handle) /** * vcn_v4_0_5_suspend - suspend VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * HW fini and suspend VCN block */ -static int vcn_v4_0_5_suspend(void *handle) +static int vcn_v4_0_5_suspend(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = vcn_v4_0_5_hw_fini(adev); + r = vcn_v4_0_5_hw_fini(ip_block); if (r) return r; - r = amdgpu_vcn_suspend(adev); + r = amdgpu_vcn_suspend(ip_block->adev); return r; } @@ -341,20 +340,19 @@ static int vcn_v4_0_5_suspend(void *handle) /** * vcn_v4_0_5_resume - resume VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Resume firmware and hw init VCN block */ -static int vcn_v4_0_5_resume(void *handle) +static int vcn_v4_0_5_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_vcn_resume(adev); + r = amdgpu_vcn_resume(ip_block->adev); if (r) return r; - r = vcn_v4_0_5_hw_init(adev); + r = vcn_v4_0_5_hw_init(ip_block); return r; } @@ -1469,13 +1467,13 @@ static bool vcn_v4_0_5_is_idle(void *handle) /** * vcn_v4_0_5_wait_for_idle - wait for VCN block idle * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Wait for VCN block idle */ -static int vcn_v4_0_5_wait_for_idle(void *handle) +static int vcn_v4_0_5_wait_for_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, ret = 0; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { @@ -1616,9 +1614,9 @@ static void vcn_v4_0_5_set_irq_funcs(struct amdgpu_device *adev) } } -static void vcn_v4_0_5_print_ip_state(void *handle, struct drm_printer *p) +static void vcn_v4_0_5_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_5); uint32_t inst_off, is_powered; @@ -1648,9 +1646,9 @@ static void vcn_v4_0_5_print_ip_state(void *handle, struct drm_printer *p) } } -static void vcn_v4_0_5_dump_ip_state(void *handle) +static void vcn_v4_0_5_dump_ip_state(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; bool is_powered; uint32_t inst_off; @@ -1680,7 +1678,6 @@ static void vcn_v4_0_5_dump_ip_state(void *handle) static const struct amd_ip_funcs vcn_v4_0_5_ip_funcs = { .name = "vcn_v4_0_5", .early_init = vcn_v4_0_5_early_init, - .late_init = NULL, .sw_init = vcn_v4_0_5_sw_init, .sw_fini = vcn_v4_0_5_sw_fini, .hw_init = vcn_v4_0_5_hw_init, @@ -1689,10 +1686,6 @@ static const struct amd_ip_funcs vcn_v4_0_5_ip_funcs = { .resume = vcn_v4_0_5_resume, .is_idle = vcn_v4_0_5_is_idle, .wait_for_idle = vcn_v4_0_5_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = vcn_v4_0_5_set_clockgating_state, .set_powergating_state = vcn_v4_0_5_set_powergating_state, .dump_ip_state = vcn_v4_0_5_dump_ip_state, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c index c305386358b4..bd3d2bbdc16b 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c @@ -87,14 +87,14 @@ static void vcn_v5_0_0_unified_ring_set_wptr(struct amdgpu_ring *ring); /** * vcn_v5_0_0_early_init - set function pointers and load microcode * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers * Load microcode from filesystem */ -static int vcn_v5_0_0_early_init(void *handle) +static int vcn_v5_0_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* re-use enc ring as unified ring */ adev->vcn.num_enc_rings = 1; @@ -108,14 +108,14 @@ static int vcn_v5_0_0_early_init(void *handle) /** * vcn_v5_0_0_sw_init - sw init for VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Load firmware and sw initialization */ -static int vcn_v5_0_0_sw_init(void *handle) +static int vcn_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, r; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0); uint32_t *ptr; @@ -170,6 +170,10 @@ static int vcn_v5_0_0_sw_init(void *handle) amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]); } + /* TODO: Add queue reset mask when FW fully supports it */ + adev->vcn.supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]); + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) adev->vcn.pause_dpg_mode = vcn_v5_0_0_pause_dpg_mode; @@ -181,19 +185,24 @@ static int vcn_v5_0_0_sw_init(void *handle) } else { adev->vcn.ip_dump = ptr; } + + r = amdgpu_vcn_sysfs_reset_mask_init(adev); + if (r) + return r; + return 0; } /** * vcn_v5_0_0_sw_fini - sw fini for VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * VCN suspend and free up sw allocation */ -static int vcn_v5_0_0_sw_fini(void *handle) +static int vcn_v5_0_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, r, idx; if (drm_dev_enter(adev_to_drm(adev), &idx)) { @@ -215,6 +224,7 @@ static int vcn_v5_0_0_sw_fini(void *handle) if (r) return r; + amdgpu_vcn_sysfs_reset_mask_fini(adev); r = amdgpu_vcn_sw_fini(adev); kfree(adev->vcn.ip_dump); @@ -225,13 +235,13 @@ static int vcn_v5_0_0_sw_fini(void *handle) /** * vcn_v5_0_0_hw_init - start and test VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Initialize the hardware, boot up the VCPU and do some testing */ -static int vcn_v5_0_0_hw_init(void *handle) +static int vcn_v5_0_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int i, r; @@ -255,13 +265,13 @@ static int vcn_v5_0_0_hw_init(void *handle) /** * vcn_v5_0_0_hw_fini - stop the hardware block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the VCN block, mark ring as not ready any more */ -static int vcn_v5_0_0_hw_fini(void *handle) +static int vcn_v5_0_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; cancel_delayed_work_sync(&adev->vcn.idle_work); @@ -284,20 +294,19 @@ static int vcn_v5_0_0_hw_fini(void *handle) /** * vcn_v5_0_0_suspend - suspend VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * HW fini and suspend VCN block */ -static int vcn_v5_0_0_suspend(void *handle) +static int vcn_v5_0_0_suspend(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = vcn_v5_0_0_hw_fini(adev); + r = vcn_v5_0_0_hw_fini(ip_block); if (r) return r; - r = amdgpu_vcn_suspend(adev); + r = amdgpu_vcn_suspend(ip_block->adev); return r; } @@ -305,20 +314,19 @@ static int vcn_v5_0_0_suspend(void *handle) /** * vcn_v5_0_0_resume - resume VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Resume firmware and hw init VCN block */ -static int vcn_v5_0_0_resume(void *handle) +static int vcn_v5_0_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_vcn_resume(adev); + r = amdgpu_vcn_resume(ip_block->adev); if (r) return r; - r = vcn_v5_0_0_hw_init(adev); + r = vcn_v5_0_0_hw_init(ip_block); return r; } @@ -1196,13 +1204,13 @@ static bool vcn_v5_0_0_is_idle(void *handle) /** * vcn_v5_0_0_wait_for_idle - wait for VCN block idle * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Wait for VCN block idle */ -static int vcn_v5_0_0_wait_for_idle(void *handle) +static int vcn_v5_0_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, ret = 0; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { @@ -1343,9 +1351,9 @@ static void vcn_v5_0_0_set_irq_funcs(struct amdgpu_device *adev) } } -static void vcn_v5_0_print_ip_state(void *handle, struct drm_printer *p) +static void vcn_v5_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0); uint32_t inst_off, is_powered; @@ -1375,9 +1383,9 @@ static void vcn_v5_0_print_ip_state(void *handle, struct drm_printer *p) } } -static void vcn_v5_0_dump_ip_state(void *handle) +static void vcn_v5_0_dump_ip_state(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; bool is_powered; uint32_t inst_off; @@ -1406,7 +1414,6 @@ static void vcn_v5_0_dump_ip_state(void *handle) static const struct amd_ip_funcs vcn_v5_0_0_ip_funcs = { .name = "vcn_v5_0_0", .early_init = vcn_v5_0_0_early_init, - .late_init = NULL, .sw_init = vcn_v5_0_0_sw_init, .sw_fini = vcn_v5_0_0_sw_fini, .hw_init = vcn_v5_0_0_hw_init, @@ -1415,10 +1422,6 @@ static const struct amd_ip_funcs vcn_v5_0_0_ip_funcs = { .resume = vcn_v5_0_0_resume, .is_idle = vcn_v5_0_0_is_idle, .wait_for_idle = vcn_v5_0_0_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = vcn_v5_0_0_set_clockgating_state, .set_powergating_state = vcn_v5_0_0_set_powergating_state, .dump_ip_state = vcn_v5_0_dump_ip_state, diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c index bf68e18e3824..0fedadd0a6a4 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c @@ -472,18 +472,18 @@ static void vega10_ih_set_self_irq_funcs(struct amdgpu_device *adev) adev->irq.self_irq.funcs = &vega10_ih_self_irq_funcs; } -static int vega10_ih_early_init(void *handle) +static int vega10_ih_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; vega10_ih_set_interrupt_funcs(adev); vega10_ih_set_self_irq_funcs(adev); return 0; } -static int vega10_ih_sw_init(void *handle) +static int vega10_ih_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_IH, 0, @@ -525,43 +525,35 @@ static int vega10_ih_sw_init(void *handle) return r; } -static int vega10_ih_sw_fini(void *handle) +static int vega10_ih_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_fini_sw(adev); return 0; } -static int vega10_ih_hw_init(void *handle) +static int vega10_ih_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return vega10_ih_irq_init(adev); + return vega10_ih_irq_init(ip_block->adev); } -static int vega10_ih_hw_fini(void *handle) +static int vega10_ih_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - vega10_ih_irq_disable(adev); + vega10_ih_irq_disable(ip_block->adev); return 0; } -static int vega10_ih_suspend(void *handle) +static int vega10_ih_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return vega10_ih_hw_fini(adev); + return vega10_ih_hw_fini(ip_block); } -static int vega10_ih_resume(void *handle) +static int vega10_ih_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return vega10_ih_hw_init(adev); + return vega10_ih_hw_init(ip_block); } static bool vega10_ih_is_idle(void *handle) @@ -570,13 +562,13 @@ static bool vega10_ih_is_idle(void *handle) return true; } -static int vega10_ih_wait_for_idle(void *handle) +static int vega10_ih_wait_for_idle(struct amdgpu_ip_block *ip_block) { /* todo */ return -ETIMEDOUT; } -static int vega10_ih_soft_reset(void *handle) +static int vega10_ih_soft_reset(struct amdgpu_ip_block *ip_block) { /* todo */ @@ -633,7 +625,6 @@ static int vega10_ih_set_powergating_state(void *handle, const struct amd_ip_funcs vega10_ih_ip_funcs = { .name = "vega10_ih", .early_init = vega10_ih_early_init, - .late_init = NULL, .sw_init = vega10_ih_sw_init, .sw_fini = vega10_ih_sw_fini, .hw_init = vega10_ih_hw_init, diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c index ac439f0565e3..1c9aff742e43 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c @@ -114,6 +114,33 @@ static int vega20_ih_toggle_ring_interrupts(struct amdgpu_device *adev, tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0)); tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_GPU_TS_ENABLE, 1); + if (enable) { + /* Unset the CLEAR_OVERFLOW bit to make sure the next step + * is switching the bit from 0 to 1 + */ + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0); + if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) { + if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) + return -ETIMEDOUT; + } else { + WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); + } + + /* Clear RB_OVERFLOW bit */ + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); + if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) { + if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) + return -ETIMEDOUT; + } else { + WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); + } + + /* Unset the CLEAR_OVERFLOW bit immediately so new overflows + * can be detected. + */ + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0); + } + /* enable_intr field is only valid in ring0 */ if (ih == &adev->irq.ih) tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0)); @@ -526,18 +553,18 @@ static void vega20_ih_set_self_irq_funcs(struct amdgpu_device *adev) adev->irq.self_irq.funcs = &vega20_ih_self_irq_funcs; } -static int vega20_ih_early_init(void *handle) +static int vega20_ih_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; vega20_ih_set_interrupt_funcs(adev); vega20_ih_set_self_irq_funcs(adev); return 0; } -static int vega20_ih_sw_init(void *handle) +static int vega20_ih_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; bool use_bus_addr = true; int r; @@ -586,19 +613,19 @@ static int vega20_ih_sw_init(void *handle) return r; } -static int vega20_ih_sw_fini(void *handle) +static int vega20_ih_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_fini_sw(adev); return 0; } -static int vega20_ih_hw_init(void *handle) +static int vega20_ih_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = vega20_ih_irq_init(adev); if (r) @@ -607,27 +634,21 @@ static int vega20_ih_hw_init(void *handle) return 0; } -static int vega20_ih_hw_fini(void *handle) +static int vega20_ih_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - vega20_ih_irq_disable(adev); + vega20_ih_irq_disable(ip_block->adev); return 0; } -static int vega20_ih_suspend(void *handle) +static int vega20_ih_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return vega20_ih_hw_fini(adev); + return vega20_ih_hw_fini(ip_block); } -static int vega20_ih_resume(void *handle) +static int vega20_ih_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return vega20_ih_hw_init(adev); + return vega20_ih_hw_init(ip_block); } static bool vega20_ih_is_idle(void *handle) @@ -636,13 +657,13 @@ static bool vega20_ih_is_idle(void *handle) return true; } -static int vega20_ih_wait_for_idle(void *handle) +static int vega20_ih_wait_for_idle(struct amdgpu_ip_block *ip_block) { /* todo */ return -ETIMEDOUT; } -static int vega20_ih_soft_reset(void *handle) +static int vega20_ih_soft_reset(struct amdgpu_ip_block *ip_block) { /* todo */ @@ -696,7 +717,6 @@ static int vega20_ih_set_powergating_state(void *handle, const struct amd_ip_funcs vega20_ih_ip_funcs = { .name = "vega20_ih", .early_init = vega20_ih_early_init, - .late_init = NULL, .sw_init = vega20_ih_sw_init, .sw_fini = vega20_ih_sw_fini, .hw_init = vega20_ih_hw_init, diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index d39c670f6220..a83505815d39 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -136,15 +136,15 @@ static const struct amdgpu_video_codec_info polaris_video_codecs_encode_array[] { .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, .max_width = 4096, - .max_height = 2304, - .max_pixels_per_frame = 4096 * 2304, + .max_height = 4096, + .max_pixels_per_frame = 4096 * 4096, .max_level = 0, }, { .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, .max_width = 4096, - .max_height = 2304, - .max_pixels_per_frame = 4096 * 2304, + .max_height = 4096, + .max_pixels_per_frame = 4096 * 4096, .max_level = 0, }, }; @@ -1455,9 +1455,9 @@ static const struct amdgpu_asic_funcs vi_asic_funcs = #define CZ_REV_BRISTOL(rev) \ ((rev >= 0xC8 && rev <= 0xCE) || (rev >= 0xE1 && rev <= 0xE6)) -static int vi_common_early_init(void *handle) +static int vi_common_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->flags & AMD_IS_APU) { adev->smc_rreg = &cz_smc_rreg; @@ -1679,9 +1679,9 @@ static int vi_common_early_init(void *handle) return 0; } -static int vi_common_late_init(void *handle) +static int vi_common_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) xgpu_vi_mailbox_get_irq(adev); @@ -1689,9 +1689,9 @@ static int vi_common_late_init(void *handle) return 0; } -static int vi_common_sw_init(void *handle) +static int vi_common_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) xgpu_vi_mailbox_add_irq_id(adev); @@ -1699,14 +1699,9 @@ static int vi_common_sw_init(void *handle) return 0; } -static int vi_common_sw_fini(void *handle) +static int vi_common_hw_init(struct amdgpu_ip_block *ip_block) { - return 0; -} - -static int vi_common_hw_init(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* move the golden regs per IP block */ vi_init_golden_registers(adev); @@ -1718,9 +1713,9 @@ static int vi_common_hw_init(void *handle) return 0; } -static int vi_common_hw_fini(void *handle) +static int vi_common_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* enable the doorbell aperture */ vi_enable_doorbell_aperture(adev, false); @@ -1731,18 +1726,14 @@ static int vi_common_hw_fini(void *handle) return 0; } -static int vi_common_suspend(void *handle) +static int vi_common_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return vi_common_hw_fini(adev); + return vi_common_hw_fini(ip_block); } -static int vi_common_resume(void *handle) +static int vi_common_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return vi_common_hw_init(adev); + return vi_common_hw_init(ip_block); } static bool vi_common_is_idle(void *handle) @@ -1750,16 +1741,6 @@ static bool vi_common_is_idle(void *handle) return true; } -static int vi_common_wait_for_idle(void *handle) -{ - return 0; -} - -static int vi_common_soft_reset(void *handle) -{ - return 0; -} - static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev, bool enable) { @@ -2047,19 +2028,14 @@ static const struct amd_ip_funcs vi_common_ip_funcs = { .early_init = vi_common_early_init, .late_init = vi_common_late_init, .sw_init = vi_common_sw_init, - .sw_fini = vi_common_sw_fini, .hw_init = vi_common_hw_init, .hw_fini = vi_common_hw_fini, .suspend = vi_common_suspend, .resume = vi_common_resume, .is_idle = vi_common_is_idle, - .wait_for_idle = vi_common_wait_for_idle, - .soft_reset = vi_common_soft_reset, .set_clockgating_state = vi_common_set_clockgating_state, .set_powergating_state = vi_common_set_powergating_state, .get_clockgating_state = vi_common_get_clockgating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ip_block_version vi_common_ip_block = diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 9044bdb38cf4..065d87841459 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -365,7 +365,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, p->pasid, dev->id); - err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, &queue_id, + err = pqm_create_queue(&p->pqm, dev, &q_properties, &queue_id, NULL, NULL, NULL, &doorbell_offset_in_process); if (err != 0) goto err_create_queue; @@ -1148,7 +1148,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep, if (flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM) size >>= 1; - WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + PAGE_ALIGN(size)); + atomic64_add(PAGE_ALIGN(size), &pdd->vram_usage); } mutex_unlock(&p->mutex); @@ -1219,7 +1219,7 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep, kfd_process_device_remove_obj_handle( pdd, GET_IDR_HANDLE(args->handle)); - WRITE_ONCE(pdd->vram_usage, pdd->vram_usage - size); + atomic64_sub(size, &pdd->vram_usage); err_unlock: err_pdd: @@ -2347,7 +2347,7 @@ static int criu_restore_memory_of_gpu(struct kfd_process_device *pdd, } else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { bo_bucket->restored_offset = offset; /* Update the VRAM usage count */ - WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + bo_bucket->size); + atomic64_add(bo_bucket->size, &pdd->vram_usage); } return 0; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index 48caecf7e72e..7b826a136ceb 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -28,6 +28,7 @@ #include "kfd_topology.h" #include "amdgpu.h" #include "amdgpu_amdkfd.h" +#include "amdgpu_xgmi.h" /* GPU Processor ID base for dGPUs for which VCRAT needs to be created. * GPU processor ID are expressed with Bit[31]=1. @@ -1509,6 +1510,8 @@ static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev, if (adev->gfx.config.gc_tcp_size_per_cu) { pcache_info[i].cache_size = adev->gfx.config.gc_tcp_size_per_cu; pcache_info[i].cache_level = 1; + /* Cacheline size not available in IP discovery for gc943,gc944 */ + pcache_info[i].cache_line_size = 128; pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE); @@ -1520,6 +1523,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev, pcache_info[i].cache_size = adev->gfx.config.gc_l1_instruction_cache_size_per_sqc; pcache_info[i].cache_level = 1; + pcache_info[i].cache_line_size = 64; pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_INST_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE); @@ -1530,6 +1534,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev, if (adev->gfx.config.gc_l1_data_cache_size_per_sqc) { pcache_info[i].cache_size = adev->gfx.config.gc_l1_data_cache_size_per_sqc; pcache_info[i].cache_level = 1; + pcache_info[i].cache_line_size = 64; pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE); @@ -1540,6 +1545,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev, if (adev->gfx.config.gc_tcc_size) { pcache_info[i].cache_size = adev->gfx.config.gc_tcc_size; pcache_info[i].cache_level = 2; + pcache_info[i].cache_line_size = 128; pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE); @@ -1550,6 +1556,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev, if (adev->gmc.mall_size) { pcache_info[i].cache_size = adev->gmc.mall_size / 1024; pcache_info[i].cache_level = 3; + pcache_info[i].cache_line_size = 64; pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE); @@ -2329,6 +2336,8 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image, continue; if (peer_dev->gpu->kfd->hive_id != kdev->kfd->hive_id) continue; + if (!amdgpu_xgmi_get_is_sharing_enabled(kdev->adev, peer_dev->gpu->adev)) + continue; sub_type_hdr = (typeof(sub_type_hdr))( (char *)sub_type_hdr + sizeof(struct crat_subtype_iolink)); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index fad1c8f2bc83..9b51dd75fefc 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -235,6 +235,9 @@ static void kfd_device_info_init(struct kfd_dev *kfd, */ kfd->device_info.needs_pci_atomics = true; kfd->device_info.no_atomic_fw_version = kfd->adev->gfx.rs64_enable ? 509 : 0; + } else if (gc_version < IP_VERSION(13, 0, 0)) { + kfd->device_info.needs_pci_atomics = true; + kfd->device_info.no_atomic_fw_version = 2090; } else { kfd->device_info.needs_pci_atomics = true; } @@ -534,7 +537,8 @@ static void kfd_cwsr_init(struct kfd_dev *kfd) kfd->cwsr_isa = cwsr_trap_gfx11_hex; kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx11_hex); } else { - BUILD_BUG_ON(sizeof(cwsr_trap_gfx12_hex) > PAGE_SIZE); + BUILD_BUG_ON(sizeof(cwsr_trap_gfx12_hex) + > KFD_CWSR_TMA_OFFSET); kfd->cwsr_isa = cwsr_trap_gfx12_hex; kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx12_hex); } @@ -1392,6 +1396,13 @@ void kfd_dec_compute_active(struct kfd_node *node) WARN_ONCE(count < 0, "Compute profile ref. count error"); } +static bool kfd_compute_active(struct kfd_node *node) +{ + if (atomic_read(&node->kfd->compute_profile)) + return true; + return false; +} + void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask) { /* @@ -1485,6 +1496,24 @@ int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id) return node->dqm->ops.halt(node->dqm); } +bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id) +{ + struct kfd_node *node; + + if (!kfd->init_complete) + return false; + + if (node_id >= kfd->num_nodes) { + dev_warn(kfd->adev->dev, "Invalid node ID: %u exceeds %u\n", + node_id, kfd->num_nodes - 1); + return false; + } + + node = kfd->nodes[node_id]; + + return kfd_compute_active(node); +} + #if defined(CONFIG_DEBUG_FS) /* This function will send a package to HIQ to hang the HWS diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 648f40091aa3..c79fe9069e22 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -202,6 +202,8 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q, int r, queue_type; uint64_t wptr_addr_off; + if (!dqm->sched_running || dqm->sched_halt) + return 0; if (!down_read_trylock(&adev->reset_domain->sem)) return -EIO; @@ -270,6 +272,8 @@ static int remove_queue_mes(struct device_queue_manager *dqm, struct queue *q, int r; struct mes_remove_queue_input queue_input; + if (!dqm->sched_running || dqm->sched_halt) + return 0; if (!down_read_trylock(&adev->reset_domain->sem)) return -EIO; @@ -292,7 +296,7 @@ static int remove_queue_mes(struct device_queue_manager *dqm, struct queue *q, return r; } -static int remove_all_queues_mes(struct device_queue_manager *dqm) +static int remove_all_kfd_queues_mes(struct device_queue_manager *dqm) { struct device_process_node *cur; struct device *dev = dqm->dev->adev->dev; @@ -319,6 +323,33 @@ static int remove_all_queues_mes(struct device_queue_manager *dqm) return retval; } +static int add_all_kfd_queues_mes(struct device_queue_manager *dqm) +{ + struct device_process_node *cur; + struct device *dev = dqm->dev->adev->dev; + struct qcm_process_device *qpd; + struct queue *q; + int retval = 0; + + list_for_each_entry(cur, &dqm->queues, list) { + qpd = cur->qpd; + list_for_each_entry(q, &qpd->queues_list, list) { + if (!q->properties.is_active) + continue; + retval = add_queue_mes(dqm, q, qpd); + if (retval) { + dev_err(dev, "%s: Failed to add queue %d for dev %d", + __func__, + q->properties.queue_id, + dqm->dev->id); + return retval; + } + } + } + + return retval; +} + static int suspend_all_queues_mes(struct device_queue_manager *dqm) { struct amdgpu_device *adev = (struct amdgpu_device *)dqm->dev->adev; @@ -1742,7 +1773,7 @@ static int halt_cpsch(struct device_queue_manager *dqm) KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD, false); else - ret = remove_all_queues_mes(dqm); + ret = remove_all_kfd_queues_mes(dqm); } dqm->sched_halt = true; dqm_unlock(dqm); @@ -1768,6 +1799,9 @@ static int unhalt_cpsch(struct device_queue_manager *dqm) ret = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD); + else + ret = add_all_kfd_queues_mes(dqm); + dqm_unlock(dqm); return ret; @@ -1867,7 +1901,7 @@ static int stop_cpsch(struct device_queue_manager *dqm) if (!dqm->dev->kfd->shared_resources.enable_mes) unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD, false); else - remove_all_queues_mes(dqm); + remove_all_kfd_queues_mes(dqm); dqm->sched_running = false; @@ -2048,7 +2082,7 @@ int amdkfd_fence_wait_timeout(struct device_queue_manager *dqm, { unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies; struct device *dev = dqm->dev->adev->dev; - uint64_t *fence_addr = dqm->fence_addr; + uint64_t *fence_addr = dqm->fence_addr; while (*fence_addr != fence_value) { /* Fatal err detected, this response won't come */ @@ -2254,6 +2288,7 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm, goto out; *dqm->fence_addr = KFD_FENCE_INIT; + mb(); pm_send_query_status(&dqm->packet_mgr, dqm->fence_gpu_addr, KFD_FENCE_COMPLETED); /* should be timed out */ @@ -3173,7 +3208,7 @@ struct copy_context_work_handler_workarea { struct kfd_process *p; }; -static void copy_context_work_handler (struct work_struct *work) +static void copy_context_work_handler(struct work_struct *work) { struct copy_context_work_handler_workarea *workarea; struct mqd_manager *mqd_mgr; @@ -3200,6 +3235,9 @@ static void copy_context_work_handler (struct work_struct *work) struct qcm_process_device *qpd = &pdd->qpd; list_for_each_entry(q, &qpd->queues_list, list) { + if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE) + continue; + mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP]; /* We ignore the return value from get_wave_state diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c index 4843dcb9a5f7..2b0a830f5b29 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c @@ -125,7 +125,7 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_node *dev, memset(kq->pq_kernel_addr, 0, queue_size); memset(kq->rptr_kernel, 0, sizeof(*kq->rptr_kernel)); - memset(kq->wptr_kernel, 0, sizeof(*kq->wptr_kernel)); + memset(kq->wptr_kernel, 0, dev->kfd->device_info.doorbell_size); prop.queue_size = queue_size; prop.is_interop = false; @@ -306,12 +306,17 @@ int kq_submit_packet(struct kernel_queue *kq) if (amdgpu_amdkfd_is_fed(kq->dev->adev)) return -EIO; + /* Make sure ring buffer is updated before wptr updated */ + mb(); + if (kq->dev->kfd->device_info.doorbell_size == 8) { *kq->wptr64_kernel = kq->pending_wptr64; + mb(); /* Make sure wptr updated before ring doorbell */ write_kernel_doorbell64(kq->queue->properties.doorbell_ptr, kq->pending_wptr64); } else { *kq->wptr_kernel = kq->pending_wptr; + mb(); /* Make sure wptr updated before ring doorbell */ write_kernel_doorbell(kq->queue->properties.doorbell_ptr, kq->pending_wptr); } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 8ee3d07ffbdf..eacfeb32f35d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -445,14 +445,13 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange, pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n", mpages, cpages, migrate.npages); - kfd_smi_event_migration_end(node, p->lead_thread->pid, - start >> PAGE_SHIFT, end >> PAGE_SHIFT, - 0, node->id, trigger); - svm_range_dma_unmap_dev(adev->dev, scratch, 0, npages); out_free: kvfree(buf); + kfd_smi_event_migration_end(node, p->lead_thread->pid, + start >> PAGE_SHIFT, end >> PAGE_SHIFT, + 0, node->id, trigger, r); out: if (!r && mpages) { pdd = svm_range_get_pdd_by_node(prange, node); @@ -751,14 +750,13 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange, svm_migrate_copy_done(adev, mfence); migrate_vma_finalize(&migrate); - kfd_smi_event_migration_end(node, p->lead_thread->pid, - start >> PAGE_SHIFT, end >> PAGE_SHIFT, - node->id, 0, trigger); - svm_range_dma_unmap_dev(adev->dev, scratch, 0, npages); out_free: kvfree(buf); + kfd_smi_event_migration_end(node, p->lead_thread->pid, + start >> PAGE_SHIFT, end >> PAGE_SHIFT, + node->id, 0, trigger, r); out: if (!r && cpages) { mpages = cpages - upages; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index d6530febabad..9e5ca0b93b2a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -775,7 +775,7 @@ struct kfd_process_device { enum kfd_pdd_bound bound; /* VRAM usage */ - uint64_t vram_usage; + atomic64_t vram_usage; struct attribute attr_vram; char vram_filename[MAX_SYSFS_FILENAME_LEN]; @@ -1347,7 +1347,6 @@ int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p); void pqm_uninit(struct process_queue_manager *pqm); int pqm_create_queue(struct process_queue_manager *pqm, struct kfd_node *dev, - struct file *f, struct queue_properties *properties, unsigned int *qid, const struct kfd_criu_queue_priv_data *q_data, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index d07acf1b2f93..87cd52cf4ee9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -271,11 +271,9 @@ static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer) struct kfd_process *proc = NULL; struct kfd_process_device *pdd = NULL; int i; - struct kfd_cu_occupancy cu_occupancy[AMDGPU_MAX_QUEUES]; + struct kfd_cu_occupancy *cu_occupancy; u32 queue_format; - memset(cu_occupancy, 0x0, sizeof(cu_occupancy)); - pdd = container_of(attr, struct kfd_process_device, attr_cu_occupancy); dev = pdd->dev; if (dev->kfd2kgd->get_cu_occupancy == NULL) @@ -293,6 +291,10 @@ static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer) wave_cnt = 0; max_waves_per_cu = 0; + cu_occupancy = kcalloc(AMDGPU_MAX_QUEUES, sizeof(*cu_occupancy), GFP_KERNEL); + if (!cu_occupancy) + return -ENOMEM; + /* * For GFX 9.4.3, fetch the CU occupancy from the first XCC in the partition. * For AQL queues, because of cooperative dispatch we multiply the wave count @@ -318,6 +320,7 @@ static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer) /* Translate wave count to number of compute units */ cu_cnt = (wave_cnt + (max_waves_per_cu - 1)) / max_waves_per_cu; + kfree(cu_occupancy); return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt); } @@ -332,14 +335,14 @@ static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr, } else if (strncmp(attr->name, "vram_", 5) == 0) { struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device, attr_vram); - return snprintf(buffer, PAGE_SIZE, "%llu\n", READ_ONCE(pdd->vram_usage)); + return snprintf(buffer, PAGE_SIZE, "%llu\n", atomic64_read(&pdd->vram_usage)); } else if (strncmp(attr->name, "sdma_", 5) == 0) { struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device, attr_sdma); struct kfd_sdma_activity_handler_workarea sdma_activity_work_handler; - INIT_WORK(&sdma_activity_work_handler.sdma_activity_work, - kfd_sdma_activity_worker); + INIT_WORK_ONSTACK(&sdma_activity_work_handler.sdma_activity_work, + kfd_sdma_activity_worker); sdma_activity_work_handler.pdd = pdd; sdma_activity_work_handler.sdma_activity_counter = 0; @@ -347,6 +350,7 @@ static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr, schedule_work(&sdma_activity_work_handler.sdma_activity_work); flush_work(&sdma_activity_work_handler.sdma_activity_work); + destroy_work_on_stack(&sdma_activity_work_handler.sdma_activity_work); return snprintf(buffer, PAGE_SIZE, "%llu\n", (sdma_activity_work_handler.sdma_activity_counter)/ @@ -850,8 +854,10 @@ struct kfd_process *kfd_create_process(struct task_struct *thread) goto out; } - /* A prior open of /dev/kfd could have already created the process. */ - process = find_process(thread, false); + /* A prior open of /dev/kfd could have already created the process. + * find_process will increase process kref in this case + */ + process = find_process(thread, true); if (process) { pr_debug("Process already found\n"); } else { @@ -899,8 +905,6 @@ struct kfd_process *kfd_create_process(struct task_struct *thread) init_waitqueue_head(&process->wait_irq_drain); } out: - if (!IS_ERR(process)) - kref_get(&process->ref); mutex_unlock(&kfd_processes_mutex); mmput(thread->mm); @@ -1186,10 +1190,8 @@ static void kfd_process_ref_release(struct kref *ref) static struct mmu_notifier *kfd_process_alloc_notifier(struct mm_struct *mm) { - int idx = srcu_read_lock(&kfd_processes_srcu); - struct kfd_process *p = find_process_by_mm(mm); - - srcu_read_unlock(&kfd_processes_srcu, idx); + /* This increments p->ref counter if kfd process p exists */ + struct kfd_process *p = kfd_lookup_process_by_mm(mm); return p ? &p->mmu_notifier : ERR_PTR(-ESRCH); } @@ -1625,7 +1627,7 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev, pdd->bound = PDD_UNBOUND; pdd->already_dequeued = false; pdd->runtime_inuse = false; - pdd->vram_usage = 0; + atomic64_set(&pdd->vram_usage, 0); pdd->sdma_past_activity_counter = 0; pdd->user_gpu_id = dev->id; atomic64_set(&pdd->evict_duration_counter, 0); @@ -1702,12 +1704,15 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd, ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(dev->adev, avm, &p->kgd_process_info, - &ef); + p->ef ? NULL : &ef); if (ret) { dev_err(dev->adev->dev, "Failed to create process VM object\n"); return ret; } - RCU_INIT_POINTER(p->ef, ef); + + if (!p->ef) + RCU_INIT_POINTER(p->ef, ef); + pdd->drm_priv = drm_file->private_data; ret = kfd_process_device_reserve_ib_mem(pdd); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 01b960b15274..c76db22a1000 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -235,7 +235,7 @@ void pqm_uninit(struct process_queue_manager *pqm) static int init_user_queue(struct process_queue_manager *pqm, struct kfd_node *dev, struct queue **q, struct queue_properties *q_properties, - struct file *f, unsigned int qid) + unsigned int qid) { int retval; @@ -300,7 +300,6 @@ cleanup: int pqm_create_queue(struct process_queue_manager *pqm, struct kfd_node *dev, - struct file *f, struct queue_properties *properties, unsigned int *qid, const struct kfd_criu_queue_priv_data *q_data, @@ -374,7 +373,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, * allocate_sdma_queue() in create_queue() has the * corresponding check logic. */ - retval = init_user_queue(pqm, dev, &q, properties, f, *qid); + retval = init_user_queue(pqm, dev, &q, properties, *qid); if (retval != 0) goto err_create_queue; pqn->q = q; @@ -395,7 +394,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, goto err_create_queue; } - retval = init_user_queue(pqm, dev, &q, properties, f, *qid); + retval = init_user_queue(pqm, dev, &q, properties, *qid); if (retval != 0) goto err_create_queue; pqn->q = q; @@ -1029,8 +1028,7 @@ int kfd_criu_restore_queue(struct kfd_process *p, print_queue_properties(&qp); - ret = pqm_create_queue(&p->pqm, pdd->dev, NULL, &qp, &queue_id, q_data, mqd, ctl_stack, - NULL); + ret = pqm_create_queue(&p->pqm, pdd->dev, &qp, &queue_id, q_data, mqd, ctl_stack, NULL); if (ret) { pr_err("Failed to create new queue err:%d\n", ret); goto exit; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c index de8b9abf7afc..9b8169761ec5 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c @@ -44,7 +44,7 @@ struct kfd_smi_client { bool suser; }; -#define MAX_KFIFO_SIZE 1024 +#define KFD_MAX_KFIFO_SIZE 8192 static __poll_t kfd_smi_ev_poll(struct file *, struct poll_table_struct *); static ssize_t kfd_smi_ev_read(struct file *, char __user *, size_t, loff_t *); @@ -86,7 +86,7 @@ static ssize_t kfd_smi_ev_read(struct file *filep, char __user *user, struct kfd_smi_client *client = filep->private_data; unsigned char *buf; - size = min_t(size_t, size, MAX_KFIFO_SIZE); + size = min_t(size_t, size, KFD_MAX_KFIFO_SIZE); buf = kmalloc(size, GFP_KERNEL); if (!buf) return -ENOMEM; @@ -292,12 +292,13 @@ void kfd_smi_event_migration_start(struct kfd_node *node, pid_t pid, void kfd_smi_event_migration_end(struct kfd_node *node, pid_t pid, unsigned long start, unsigned long end, - uint32_t from, uint32_t to, uint32_t trigger) + uint32_t from, uint32_t to, uint32_t trigger, + int error_code) { kfd_smi_event_add(pid, node, KFD_SMI_EVENT_MIGRATE_END, KFD_EVENT_FMT_MIGRATE_END( ktime_get_boottime_ns(), pid, start, end - start, - from, to, trigger)); + from, to, trigger, error_code)); } void kfd_smi_event_queue_eviction(struct kfd_node *node, pid_t pid, @@ -354,7 +355,7 @@ int kfd_smi_event_open(struct kfd_node *dev, uint32_t *fd) return -ENOMEM; INIT_LIST_HEAD(&client->list); - ret = kfifo_alloc(&client->fifo, MAX_KFIFO_SIZE, GFP_KERNEL); + ret = kfifo_alloc(&client->fifo, KFD_MAX_KFIFO_SIZE, GFP_KERNEL); if (ret) { kfree(client); return ret; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h index 85010b8307f8..503bff13d815 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h @@ -44,7 +44,8 @@ void kfd_smi_event_migration_start(struct kfd_node *node, pid_t pid, uint32_t trigger); void kfd_smi_event_migration_end(struct kfd_node *node, pid_t pid, unsigned long start, unsigned long end, - uint32_t from, uint32_t to, uint32_t trigger); + uint32_t from, uint32_t to, uint32_t trigger, + int error_code); void kfd_smi_event_queue_eviction(struct kfd_node *node, pid_t pid, uint32_t trigger); void kfd_smi_event_queue_restore(struct kfd_node *node, pid_t pid); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 04e746923697..3e2911895c74 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -405,6 +405,27 @@ static void svm_range_bo_release(struct kref *kref) spin_lock(&svm_bo->list_lock); } spin_unlock(&svm_bo->list_lock); + + if (mmget_not_zero(svm_bo->eviction_fence->mm)) { + struct kfd_process_device *pdd; + struct kfd_process *p; + struct mm_struct *mm; + + mm = svm_bo->eviction_fence->mm; + /* + * The forked child process takes svm_bo device pages ref, svm_bo could be + * released after parent process is gone. + */ + p = kfd_lookup_process_by_mm(mm); + if (p) { + pdd = kfd_get_process_device_data(svm_bo->node, p); + if (pdd) + atomic64_sub(amdgpu_bo_size(svm_bo->bo), &pdd->vram_usage); + kfd_unref_process(p); + } + mmput(mm); + } + if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base)) /* We're not in the eviction worker. Signal the fence. */ dma_fence_signal(&svm_bo->eviction_fence->base); @@ -532,6 +553,7 @@ int svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange, bool clear) { + struct kfd_process_device *pdd; struct amdgpu_bo_param bp; struct svm_range_bo *svm_bo; struct amdgpu_bo_user *ubo; @@ -623,6 +645,10 @@ svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange, list_add(&prange->svm_bo_list, &svm_bo->range_list); spin_unlock(&svm_bo->list_lock); + pdd = svm_range_get_pdd_by_node(prange, node); + if (pdd) + atomic64_add(amdgpu_bo_size(bo), &pdd->vram_usage); + return 0; reserve_bo_failed: @@ -3085,8 +3111,6 @@ retry_write_locked: start = max_t(unsigned long, ALIGN_DOWN(addr, size), prange->start); last = min_t(unsigned long, ALIGN(addr + 1, size) - 1, prange->last); if (prange->actual_loc != 0 || best_loc != 0) { - migration = true; - if (best_loc) { r = svm_migrate_to_vram(prange, best_loc, start, last, mm, KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU); @@ -3109,7 +3133,9 @@ retry_write_locked: if (r) { pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n", r, svms, start, last); - goto out_unlock_range; + goto out_migrate_fail; + } else { + migration = true; } } @@ -3119,6 +3145,7 @@ retry_write_locked: pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n", r, svms, start, last); +out_migrate_fail: kfd_smi_event_page_fault_end(node, p->lead_thread->pid, addr, migration); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 3871591c9aec..9476e30d6baa 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -1998,6 +1998,8 @@ static void kfd_topology_set_capabilities(struct kfd_topology_device *dev) if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(9, 4, 2)) dev->node_props.capability |= HSA_CAP_TRAP_DEBUG_PRECISE_MEMORY_OPERATIONS_SUPPORTED; + + dev->node_props.capability |= HSA_CAP_PER_QUEUE_RESET_SUPPORTED; } else { dev->node_props.debug_prop |= HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX10 | HSA_DBG_WATCH_ADDR_MASK_HI_BIT; diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig index df17e79c45c7..11e3f2f3b174 100644 --- a/drivers/gpu/drm/amd/display/Kconfig +++ b/drivers/gpu/drm/amd/display/Kconfig @@ -7,20 +7,21 @@ menu "Display Engine Configuration" config DRM_AMD_DC bool "AMD DC - Enable new display engine" default y - depends on BROKEN || !CC_IS_CLANG || ARM64 || RISCV || SPARC64 || X86_64 + depends on BROKEN || !CC_IS_CLANG || ARM64 || LOONGARCH || RISCV || SPARC64 || X86_64 select SND_HDA_COMPONENT if SND_HDA_CORE # !CC_IS_CLANG: https://github.com/ClangBuiltLinux/linux/issues/1752 - select DRM_AMD_DC_FP if ARCH_HAS_KERNEL_FPU_SUPPORT && !(CC_IS_CLANG && (ARM64 || RISCV)) + select DRM_AMD_DC_FP if ARCH_HAS_KERNEL_FPU_SUPPORT && !(CC_IS_CLANG && (ARM64 || LOONGARCH || RISCV)) help Choose this option if you want to use the new display engine support for AMDGPU. This adds required support for Vega and Raven ASICs. - calculate_bandwidth() is presently broken on all !(X86_64 || SPARC64 || ARM64) - architectures built with Clang (all released versions), whereby the stack - frame gets blown up to well over 5k. This would cause an immediate kernel - panic on most architectures. We'll revert this when the following bug report - has been resolved: https://github.com/llvm/llvm-project/issues/41896. + calculate_bandwidth() is presently broken on all !(X86_64 || SPARC64 || + ARM64 || LOONGARCH || RISCV) architectures built with Clang (all released + versions), whereby the stack frame gets blown up to well over 5k. This + would cause an immediate kernel panic on most architectures. We'll revert + this when the following bug report has been resolved: + https://github.com/llvm/llvm-project/issues/41896. config DRM_AMD_DC_FP def_bool n diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 60c617fcc97e..48be917e7bc5 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -320,18 +320,18 @@ static bool dm_is_idle(void *handle) return true; } -static int dm_wait_for_idle(void *handle) +static int dm_wait_for_idle(struct amdgpu_ip_block *ip_block) { /* XXX todo */ return 0; } -static bool dm_check_soft_reset(void *handle) +static bool dm_check_soft_reset(struct amdgpu_ip_block *ip_block) { return false; } -static int dm_soft_reset(void *handle) +static int dm_soft_reset(struct amdgpu_ip_block *ip_block) { /* XXX todo */ return 0; @@ -968,7 +968,7 @@ static int dm_set_powergating_state(void *handle, } /* Prototypes of private functions */ -static int dm_early_init(void *handle); +static int dm_early_init(struct amdgpu_ip_block *ip_block); /* Allocate memory for FBC compressed data */ static void amdgpu_dm_fbc_init(struct drm_connector *connector) @@ -1307,6 +1307,29 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) DRM_INFO("DMUB hardware initialized: version=0x%08X\n", adev->dm.dmcub_fw_version); + /* Keeping sanity checks off if + * DCN31 >= 4.0.59.0 + * DCN314 >= 8.0.16.0 + * Otherwise, turn on sanity checks + */ + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { + case IP_VERSION(3, 1, 2): + case IP_VERSION(3, 1, 3): + if (adev->dm.dmcub_fw_version && + adev->dm.dmcub_fw_version >= DMUB_FW_VERSION(4, 0, 0) && + adev->dm.dmcub_fw_version < DMUB_FW_VERSION(4, 0, 59)) + adev->dm.dc->debug.sanity_checks = true; + break; + case IP_VERSION(3, 1, 4): + if (adev->dm.dmcub_fw_version && + adev->dm.dmcub_fw_version >= DMUB_FW_VERSION(4, 0, 0) && + adev->dm.dmcub_fw_version < DMUB_FW_VERSION(8, 0, 16)) + adev->dm.dc->debug.sanity_checks = true; + break; + default: + break; + } + return 0; } @@ -1696,6 +1719,26 @@ dm_allocate_gpu_mem( return da->cpu_ptr; } +void +dm_free_gpu_mem( + struct amdgpu_device *adev, + enum dc_gpu_mem_alloc_type type, + void *pvMem) +{ + struct dal_allocation *da; + + /* walk the da list in DM */ + list_for_each_entry(da, &adev->dm.da_list, list) { + if (pvMem == da->cpu_ptr) { + amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr); + list_del(&da->list); + kfree(da); + break; + } + } + +} + static enum dmub_status dm_dmub_send_vbios_gpint_command(struct amdgpu_device *adev, enum dmub_gpint_command command_code, @@ -1762,16 +1805,20 @@ static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device * /* Send the chunk */ ret = dm_dmub_send_vbios_gpint_command(adev, send_addrs[i], chunk, 30000); if (ret != DMUB_STATUS_OK) - /* No need to free bb here since it shall be done in dm_sw_fini() */ - return NULL; + goto free_bb; } /* Now ask DMUB to copy the bb */ ret = dm_dmub_send_vbios_gpint_command(adev, DMUB_GPINT__BB_COPY, 1, 200000); if (ret != DMUB_STATUS_OK) - return NULL; + goto free_bb; return bb; + +free_bb: + dm_free_gpu_mem(adev, DC_MEM_ALLOC_TYPE_GART, (void *) bb); + return NULL; + } static enum dmub_ips_disable_type dm_get_default_ips_mode( @@ -1886,7 +1933,11 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) else init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0); } else { - init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0) && (adev->flags & AMD_IS_APU); + if (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(2, 0, 3)) + init_data.flags.gpu_vm_support = (amdgpu_sg_display == 1); + else + init_data.flags.gpu_vm_support = + (amdgpu_sg_display != 0) && (adev->flags & AMD_IS_APU); } adev->mode_info.gpu_vm_support = init_data.flags.gpu_vm_support; @@ -2115,9 +2166,9 @@ error: return -EINVAL; } -static int amdgpu_dm_early_fini(void *handle) +static int amdgpu_dm_early_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_dm_audio_fini(adev); @@ -2509,9 +2560,9 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) return 0; } -static int dm_sw_init(void *handle) +static int dm_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; adev->dm.cgs_device = amdgpu_cgs_create_device(adev); @@ -2531,9 +2582,9 @@ static int dm_sw_init(void *handle) return load_dmcu_fw(adev); } -static int dm_sw_fini(void *handle) +static int dm_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct dal_allocation *da; list_for_each_entry(da, &adev->dm.da_list, list) { @@ -2541,11 +2592,11 @@ static int dm_sw_fini(void *handle) amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr); list_del(&da->list); kfree(da); + adev->dm.bb_from_dmub = NULL; break; } } - adev->dm.bb_from_dmub = NULL; kfree(adev->dm.dmub_fb_info); adev->dm.dmub_fb_info = NULL; @@ -2598,9 +2649,9 @@ static int detect_mst_link_for_all_connectors(struct drm_device *dev) return ret; } -static int dm_late_init(void *handle) +static int dm_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct dmcu_iram_parameters params; unsigned int linear_lut[16]; @@ -2790,7 +2841,7 @@ static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev) /** * dm_hw_init() - Initialize DC device - * @handle: The base driver device containing the amdgpu_dm device. + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Initialize the &struct amdgpu_display_manager device. This involves calling * the initializers of each DM component, then populating the struct with them. @@ -2808,9 +2859,9 @@ static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev) * - Vblank support * - Debug FS entries, if enabled */ -static int dm_hw_init(void *handle) +static int dm_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; /* Create DAL display manager */ @@ -2824,15 +2875,15 @@ static int dm_hw_init(void *handle) /** * dm_hw_fini() - Teardown DC device - * @handle: The base driver device containing the amdgpu_dm device. + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Teardown components within &struct amdgpu_display_manager that require * cleanup. This involves cleaning up the DRM device, DC, and any modules that * were loaded. Also flush IRQ workqueues and disable them. */ -static int dm_hw_fini(void *handle) +static int dm_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_dm_hpd_fini(adev); @@ -2936,9 +2987,9 @@ static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm) } } -static int dm_suspend(void *handle) +static int dm_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_display_manager *dm = &adev->dm; int ret = 0; @@ -2972,10 +3023,11 @@ static int dm_suspend(void *handle) hpd_rx_irq_work_suspend(dm); - if (adev->dm.dc->caps.ips_support) - dc_allow_idle_optimizations(adev->dm.dc, true); - dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3); + + if (dm->dc->caps.ips_support && adev->in_s0ix) + dc_allow_idle_optimizations(dm->dc, true); + dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D3); return 0; @@ -3124,9 +3176,9 @@ cleanup: kfree(bundle); } -static int dm_resume(void *handle) +static int dm_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = handle; + struct amdgpu_device *adev = ip_block->adev; struct drm_device *ddev = adev_to_drm(adev); struct amdgpu_display_manager *dm = &adev->dm; struct amdgpu_dm_connector *aconnector; @@ -3141,8 +3193,7 @@ static int dm_resume(void *handle) struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state); enum dc_connection_type new_connection_type = dc_connection_none; struct dc_state *dc_state; - int i, r, j, ret; - bool need_hotplug = false; + int i, r, j; struct dc_commit_streams_params commit_params = {}; if (dm->dc->caps.ips_support) { @@ -3331,23 +3382,16 @@ static int dm_resume(void *handle) aconnector->mst_root) continue; - ret = drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr, true); - - if (ret < 0) { - dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx, - aconnector->dc_link); - need_hotplug = true; - } + drm_dp_mst_topology_queue_probe(&aconnector->mst_mgr); } drm_connector_list_iter_end(&iter); - if (need_hotplug) - drm_kms_helper_hotplug_event(ddev); - amdgpu_dm_irq_resume_late(adev); amdgpu_dm_smu_write_watermarks_table(adev); + drm_kms_helper_hotplug_event(ddev); + return 0; } @@ -3378,8 +3422,6 @@ static const struct amd_ip_funcs amdgpu_dm_funcs = { .soft_reset = dm_soft_reset, .set_clockgating_state = dm_set_clockgating_state, .set_powergating_state = dm_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; const struct amdgpu_ip_block_version dm_ip_block = { @@ -3439,6 +3481,8 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) caps->aux_support = false; else if (amdgpu_backlight == 1) caps->aux_support = true; + if (caps->aux_support) + aconnector->dc_link->backlight_control_type = BACKLIGHT_CONTROL_AMD_AUX; luminance_range = &conn_base->display_info.luminance_range; @@ -3494,7 +3538,7 @@ void amdgpu_dm_update_connector_after_detect( aconnector->dc_sink = sink; dc_sink_retain(aconnector->dc_sink); amdgpu_dm_update_freesync_caps(connector, - aconnector->edid); + aconnector->drm_edid); } else { amdgpu_dm_update_freesync_caps(connector, NULL); if (!aconnector->dc_sink) { @@ -3553,18 +3597,19 @@ void amdgpu_dm_update_connector_after_detect( aconnector->dc_sink = sink; dc_sink_retain(aconnector->dc_sink); if (sink->dc_edid.length == 0) { - aconnector->edid = NULL; + aconnector->drm_edid = NULL; if (aconnector->dc_link->aux_mode) { - drm_dp_cec_unset_edid( - &aconnector->dm_dp_aux.aux); + drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux); } } else { - aconnector->edid = - (struct edid *)sink->dc_edid.raw_edid; + const struct edid *edid = (const struct edid *)sink->dc_edid.raw_edid; + + aconnector->drm_edid = drm_edid_alloc(edid, sink->dc_edid.length); + drm_edid_connector_update(connector, aconnector->drm_edid); if (aconnector->dc_link->aux_mode) - drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux, - aconnector->edid); + drm_dp_cec_attach(&aconnector->dm_dp_aux.aux, + connector->display_info.source_physical_address); } if (!aconnector->timing_requested) { @@ -3575,17 +3620,16 @@ void amdgpu_dm_update_connector_after_detect( "failed to create aconnector->requested_timing\n"); } - drm_connector_update_edid_property(connector, aconnector->edid); - amdgpu_dm_update_freesync_caps(connector, aconnector->edid); + amdgpu_dm_update_freesync_caps(connector, aconnector->drm_edid); update_connector_ext_caps(aconnector); } else { drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux); amdgpu_dm_update_freesync_caps(connector, NULL); - drm_connector_update_edid_property(connector, NULL); aconnector->num_modes = 0; dc_sink_release(aconnector->dc_sink); aconnector->dc_sink = NULL; - aconnector->edid = NULL; + drm_edid_free(aconnector->drm_edid); + aconnector->drm_edid = NULL; kfree(aconnector->timing_requested); aconnector->timing_requested = NULL; /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */ @@ -4621,7 +4665,12 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, if (!rc) DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx); } else { - rc = dc_link_set_backlight_level(link, brightness, 0); + struct set_backlight_level_params backlight_level_params = { 0 }; + + backlight_level_params.backlight_pwm_u16_16 = brightness; + backlight_level_params.transition_time_in_ms = 0; + + rc = dc_link_set_backlight_level(link, &backlight_level_params); if (!rc) DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx); } @@ -5176,15 +5225,20 @@ static ssize_t s3_debug_store(struct device *device, int s3_state; struct drm_device *drm_dev = dev_get_drvdata(device); struct amdgpu_device *adev = drm_to_adev(drm_dev); + struct amdgpu_ip_block *ip_block; + + ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_DCE); + if (!ip_block) + return -EINVAL; ret = kstrtoint(buf, 0, &s3_state); if (ret == 0) { if (s3_state) { - dm_resume(adev); + dm_resume(ip_block); drm_kms_helper_hotplug_event(adev_to_drm(adev)); } else - dm_suspend(adev); + dm_suspend(ip_block); } return ret == 0 ? count : 0; @@ -5256,9 +5310,9 @@ static int dm_init_microcode(struct amdgpu_device *adev) return r; } -static int dm_early_init(void *handle) +static int dm_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_mode_info *mode_info = &adev->mode_info; struct atom_context *ctx = mode_info->atom_context; int index = GetIndexIntoMasterTable(DATA, Object_Header); @@ -6761,7 +6815,7 @@ create_stream_for_sink(struct drm_connector *connector, if (stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22) tf = TRANSFER_FUNC_GAMMA_22; mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf); - aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY; + aconnector->sr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY; } finish: @@ -7121,32 +7175,24 @@ static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector) struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); struct dc_link *dc_link = aconnector->dc_link; struct dc_sink *dc_em_sink = aconnector->dc_em_sink; - struct edid *edid; - struct i2c_adapter *ddc; - - if (dc_link && dc_link->aux_mode) - ddc = &aconnector->dm_dp_aux.aux.ddc; - else - ddc = &aconnector->i2c->base; + const struct drm_edid *drm_edid; - /* - * Note: drm_get_edid gets edid in the following order: - * 1) override EDID if set via edid_override debugfs, - * 2) firmware EDID if set via edid_firmware module parameter - * 3) regular DDC read. - */ - edid = drm_get_edid(connector, ddc); - if (!edid) { + drm_edid = drm_edid_read(connector); + drm_edid_connector_update(connector, drm_edid); + if (!drm_edid) { DRM_ERROR("No EDID found on connector: %s.\n", connector->name); return; } - aconnector->edid = edid; - + aconnector->drm_edid = drm_edid; /* Update emulated (virtual) sink's EDID */ if (dc_em_sink && dc_link) { + // FIXME: Get rid of drm_edid_raw() + const struct edid *edid = drm_edid_raw(drm_edid); + memset(&dc_em_sink->edid_caps, 0, sizeof(struct dc_edid_caps)); - memmove(dc_em_sink->dc_edid.raw_edid, edid, (edid->extensions + 1) * EDID_LENGTH); + memmove(dc_em_sink->dc_edid.raw_edid, edid, + (edid->extensions + 1) * EDID_LENGTH); dm_helpers_parse_edid_caps( dc_link, &dc_em_sink->dc_edid, @@ -7176,36 +7222,26 @@ static int get_modes(struct drm_connector *connector) static void create_eml_sink(struct amdgpu_dm_connector *aconnector) { struct drm_connector *connector = &aconnector->base; - struct dc_link *dc_link = aconnector->dc_link; struct dc_sink_init_data init_params = { .link = aconnector->dc_link, .sink_signal = SIGNAL_TYPE_VIRTUAL }; - struct edid *edid; - struct i2c_adapter *ddc; - - if (dc_link->aux_mode) - ddc = &aconnector->dm_dp_aux.aux.ddc; - else - ddc = &aconnector->i2c->base; + const struct drm_edid *drm_edid; + const struct edid *edid; - /* - * Note: drm_get_edid gets edid in the following order: - * 1) override EDID if set via edid_override debugfs, - * 2) firmware EDID if set via edid_firmware module parameter - * 3) regular DDC read. - */ - edid = drm_get_edid(connector, ddc); - if (!edid) { + drm_edid = drm_edid_read(connector); + drm_edid_connector_update(connector, drm_edid); + if (!drm_edid) { DRM_ERROR("No EDID found on connector: %s.\n", connector->name); return; } - if (drm_detect_hdmi_monitor(edid)) + if (connector->display_info.is_hdmi) init_params.sink_signal = SIGNAL_TYPE_HDMI_TYPE_A; - aconnector->edid = edid; + aconnector->drm_edid = drm_edid; + edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw() aconnector->dc_em_sink = dc_link_add_remote_sink( aconnector->dc_link, (uint8_t *)edid, @@ -7312,10 +7348,15 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL; int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8; enum dc_status dc_result = DC_OK; + uint8_t bpc_limit = 6; if (!dm_state) return NULL; + if (aconnector->dc_link->connector_signal == SIGNAL_TYPE_HDMI_TYPE_A || + aconnector->dc_link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) + bpc_limit = 8; + do { stream = create_stream_for_sink(connector, drm_mode, dm_state, old_stream, @@ -7336,11 +7377,12 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, dc_result = dm_validate_stream_and_context(adev->dm.dc, stream); if (dc_result != DC_OK) { - DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n", + DRM_DEBUG_KMS("Mode %dx%d (clk %d) pixel_encoding:%s color_depth:%s failed validation -- %s\n", drm_mode->hdisplay, drm_mode->vdisplay, drm_mode->clock, - dc_result, + dc_pixel_encoding_to_str(stream->timing.pixel_encoding), + dc_color_depth_to_str(stream->timing.display_color_depth), dc_status_to_str(dc_result)); dc_stream_release(stream); @@ -7348,10 +7390,13 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, requested_bpc -= 2; /* lower bpc to retry validation */ } - } while (stream == NULL && requested_bpc >= 6); + } while (stream == NULL && requested_bpc >= bpc_limit); - if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) { - DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n"); + if ((dc_result == DC_FAIL_ENC_VALIDATE || + dc_result == DC_EXCEED_DONGLE_CAP) && + !aconnector->force_yuv420_output) { + DRM_DEBUG_KMS("%s:%d Retry forcing yuv420 encoding\n", + __func__, __LINE__); aconnector->force_yuv420_output = true; stream = create_validate_stream_for_sink(aconnector, drm_mode, @@ -7892,16 +7937,16 @@ static void amdgpu_set_panel_orientation(struct drm_connector *connector) } static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector, - struct edid *edid) + const struct drm_edid *drm_edid) { struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); - if (edid) { + if (drm_edid) { /* empty probed_modes */ INIT_LIST_HEAD(&connector->probed_modes); amdgpu_dm_connector->num_modes = - drm_add_edid_modes(connector, edid); + drm_edid_connector_add_modes(connector); /* sorting the probed modes before calling function * amdgpu_dm_get_native_mode() since EDID can have @@ -7915,10 +7960,10 @@ static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector, amdgpu_dm_get_native_mode(connector); /* Freesync capabilities are reset by calling - * drm_add_edid_modes() and need to be + * drm_edid_connector_add_modes() and need to be * restored here. */ - amdgpu_dm_update_freesync_caps(connector, edid); + amdgpu_dm_update_freesync_caps(connector, drm_edid); } else { amdgpu_dm_connector->num_modes = 0; } @@ -8014,12 +8059,12 @@ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector) } static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector, - struct edid *edid) + const struct drm_edid *drm_edid) { struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); - if (!(amdgpu_freesync_vid_mode && edid)) + if (!(amdgpu_freesync_vid_mode && drm_edid)) return; if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) @@ -8032,24 +8077,24 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector) struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); struct drm_encoder *encoder; - struct edid *edid = amdgpu_dm_connector->edid; + const struct drm_edid *drm_edid = amdgpu_dm_connector->drm_edid; struct dc_link_settings *verified_link_cap = &amdgpu_dm_connector->dc_link->verified_link_cap; const struct dc *dc = amdgpu_dm_connector->dc_link->dc; encoder = amdgpu_dm_connector_to_encoder(connector); - if (!drm_edid_is_valid(edid)) { + if (!drm_edid) { amdgpu_dm_connector->num_modes = drm_add_modes_noedid(connector, 640, 480); if (dc->link_srv->dp_get_encoding_format(verified_link_cap) == DP_128b_132b_ENCODING) amdgpu_dm_connector->num_modes += drm_add_modes_noedid(connector, 1920, 1080); } else { - amdgpu_dm_connector_ddc_get_modes(connector, edid); + amdgpu_dm_connector_ddc_get_modes(connector, drm_edid); if (encoder) amdgpu_dm_connector_add_common_modes(encoder, connector); - amdgpu_dm_connector_add_freesync_modes(connector, edid); + amdgpu_dm_connector_add_freesync_modes(connector, drm_edid); } amdgpu_dm_fbc_init(connector); @@ -8373,7 +8418,8 @@ static void manage_dm_interrupts(struct amdgpu_device *adev, if (amdgpu_ip_version(adev, DCE_HWIP, 0) < IP_VERSION(3, 5, 0) || acrtc_state->stream->link->psr_settings.psr_version < - DC_PSR_VERSION_UNSUPPORTED) { + DC_PSR_VERSION_UNSUPPORTED || + !(adev->flags & AMD_IS_APU)) { timing = &acrtc_state->stream->timing; /* at least 2 frames */ @@ -8873,6 +8919,56 @@ static void amdgpu_dm_update_cursor(struct drm_plane *plane, } } +static void amdgpu_dm_enable_self_refresh(struct amdgpu_crtc *acrtc_attach, + const struct dm_crtc_state *acrtc_state, + const u64 current_ts) +{ + struct psr_settings *psr = &acrtc_state->stream->link->psr_settings; + struct replay_settings *pr = &acrtc_state->stream->link->replay_settings; + struct amdgpu_dm_connector *aconn = + (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context; + + if (acrtc_state->update_type > UPDATE_TYPE_FAST) { + if (pr->config.replay_supported && !pr->replay_feature_enabled) + amdgpu_dm_link_setup_replay(acrtc_state->stream->link, aconn); + else if (psr->psr_version != DC_PSR_VERSION_UNSUPPORTED && + !psr->psr_feature_enabled) + if (!aconn->disallow_edp_enter_psr) + amdgpu_dm_link_setup_psr(acrtc_state->stream); + } + + /* Decrement skip count when SR is enabled and we're doing fast updates. */ + if (acrtc_state->update_type == UPDATE_TYPE_FAST && + (psr->psr_feature_enabled || pr->config.replay_supported)) { + if (aconn->sr_skip_count > 0) + aconn->sr_skip_count--; + + /* Allow SR when skip count is 0. */ + acrtc_attach->dm_irq_params.allow_sr_entry = !aconn->sr_skip_count; + + /* + * If sink supports PSR SU/Panel Replay, there is no need to rely on + * a vblank event disable request to enable PSR/RP. PSR SU/RP + * can be enabled immediately once OS demonstrates an + * adequate number of fast atomic commits to notify KMD + * of update events. See `vblank_control_worker()`. + */ + if (acrtc_attach->dm_irq_params.allow_sr_entry && +#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY + !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) && +#endif + (current_ts - psr->psr_dirty_rects_change_timestamp_ns) > 500000000) { + if (pr->replay_feature_enabled && !pr->replay_allow_active) + amdgpu_dm_replay_enable(acrtc_state->stream, true); + if (psr->psr_version >= DC_PSR_VERSION_SU_1 && + !psr->psr_allow_active && !aconn->disallow_edp_enter_psr) + amdgpu_dm_psr_enable(acrtc_state->stream); + } + } else { + acrtc_attach->dm_irq_params.allow_sr_entry = false; + } +} + static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, struct drm_device *dev, struct amdgpu_display_manager *dm, @@ -9026,7 +9122,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, * during the PSR-SU was disabled. */ if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 && - acrtc_attach->dm_irq_params.allow_psr_entry && + acrtc_attach->dm_irq_params.allow_sr_entry && #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) && #endif @@ -9201,9 +9297,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, bundle->stream_update.abm_level = &acrtc_state->abm_level; mutex_lock(&dm->dc_lock); - if ((acrtc_state->update_type > UPDATE_TYPE_FAST) && - acrtc_state->stream->link->psr_settings.psr_allow_active) - amdgpu_dm_psr_disable(acrtc_state->stream); + if (acrtc_state->update_type > UPDATE_TYPE_FAST) { + if (acrtc_state->stream->link->replay_settings.replay_allow_active) + amdgpu_dm_replay_disable(acrtc_state->stream); + if (acrtc_state->stream->link->psr_settings.psr_allow_active) + amdgpu_dm_psr_disable(acrtc_state->stream); + } mutex_unlock(&dm->dc_lock); /* @@ -9244,57 +9343,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, dm_update_pflip_irq_state(drm_to_adev(dev), acrtc_attach); - if (acrtc_state->update_type > UPDATE_TYPE_FAST) { - if (acrtc_state->stream->link->replay_settings.config.replay_supported && - !acrtc_state->stream->link->replay_settings.replay_feature_enabled) { - struct amdgpu_dm_connector *aconn = - (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context; - amdgpu_dm_link_setup_replay(acrtc_state->stream->link, aconn); - } else if (acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED && - !acrtc_state->stream->link->psr_settings.psr_feature_enabled) { - - struct amdgpu_dm_connector *aconn = (struct amdgpu_dm_connector *) - acrtc_state->stream->dm_stream_context; - - if (!aconn->disallow_edp_enter_psr) - amdgpu_dm_link_setup_psr(acrtc_state->stream); - } - } - - /* Decrement skip count when PSR is enabled and we're doing fast updates. */ - if (acrtc_state->update_type == UPDATE_TYPE_FAST && - acrtc_state->stream->link->psr_settings.psr_feature_enabled) { - struct amdgpu_dm_connector *aconn = - (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context; - - if (aconn->psr_skip_count > 0) - aconn->psr_skip_count--; - - /* Allow PSR when skip count is 0. */ - acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count; - - /* - * If sink supports PSR SU, there is no need to rely on - * a vblank event disable request to enable PSR. PSR SU - * can be enabled immediately once OS demonstrates an - * adequate number of fast atomic commits to notify KMD - * of update events. See `vblank_control_worker()`. - */ - if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 && - acrtc_attach->dm_irq_params.allow_psr_entry && -#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY - !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) && -#endif - !acrtc_state->stream->link->psr_settings.psr_allow_active && - !aconn->disallow_edp_enter_psr && - (timestamp_ns - - acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns) > - 500000000) - amdgpu_dm_psr_enable(acrtc_state->stream); - } else { - acrtc_attach->dm_irq_params.allow_psr_entry = false; - } - + amdgpu_dm_enable_self_refresh(acrtc_attach, acrtc_state, timestamp_ns); mutex_unlock(&dm->dc_lock); } @@ -9427,6 +9476,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, bool mode_set_reset_required = false; u32 i; struct dc_commit_streams_params params = {dc_state->streams, dc_state->stream_count}; + bool set_backlight_level = false; /* Disable writeback */ for_each_old_connector_in_state(state, connector, old_con_state, i) { @@ -9546,6 +9596,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, acrtc->hw_mode = new_crtc_state->mode; crtc->hwmode = new_crtc_state->mode; mode_set_reset_required = true; + set_backlight_level = true; } else if (modereset_required(new_crtc_state)) { drm_dbg_atomic(dev, "Atomic commit: RESET. crtc id %d:[%p]\n", @@ -9573,7 +9624,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, WARN_ON(!dc_commit_streams(dm->dc, ¶ms)); /* Allow idle optimization when vblank count is 0 for display off */ - if (dm->active_vblank_irq_count == 0) + if ((dm->active_vblank_irq_count == 0) && amdgpu_dm_is_headless(dm->adev)) dc_allow_idle_optimizations(dm->dc, true); mutex_unlock(&dm->dc_lock); @@ -9597,6 +9648,19 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, acrtc->otg_inst = status->primary_otg_inst; } } + + /* During boot up and resume the DC layer will reset the panel brightness + * to fix a flicker issue. + * It will cause the dm->actual_brightness is not the current panel brightness + * level. (the dm->brightness is the correct panel level) + * So we set the backlight level with dm->brightness value after set mode + */ + if (set_backlight_level) { + for (i = 0; i < dm->num_of_edps; i++) { + if (dm->backlight_dev[i]) + amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]); + } + } } static void dm_set_writeback(struct amdgpu_display_manager *dm, @@ -10104,6 +10168,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) for (i = 0; i < crtc_disable_count; i++) pm_runtime_put_autosuspend(dev->dev); pm_runtime_mark_last_busy(dev->dev); + + trace_amdgpu_dm_atomic_commit_tail_finish(state); } static int dm_force_atomic_commit(struct drm_connector *connector) @@ -12004,7 +12070,7 @@ static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector, } static void parse_edid_displayid_vrr(struct drm_connector *connector, - struct edid *edid) + const struct edid *edid) { u8 *edid_ext = NULL; int i; @@ -12047,7 +12113,7 @@ static void parse_edid_displayid_vrr(struct drm_connector *connector, } static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector, - struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info) + const struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info) { u8 *edid_ext = NULL; int i; @@ -12063,7 +12129,7 @@ static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector, break; } - while (j < EDID_LENGTH) { + while (j < EDID_LENGTH - sizeof(struct amd_vsdb_block)) { struct amd_vsdb_block *amd_vsdb = (struct amd_vsdb_block *)&edid_ext[j]; unsigned int ieeeId = (amd_vsdb->ieee_id[2] << 16) | (amd_vsdb->ieee_id[1] << 8) | (amd_vsdb->ieee_id[0]); @@ -12082,7 +12148,8 @@ static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector, } static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector, - struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info) + const struct edid *edid, + struct amdgpu_hdmi_vsdb_info *vsdb_info) { u8 *edid_ext = NULL; int i; @@ -12116,7 +12183,7 @@ static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector, * amdgpu_dm_update_freesync_caps - Update Freesync capabilities * * @connector: Connector to query. - * @edid: EDID from monitor + * @drm_edid: DRM EDID from monitor * * Amdgpu supports Freesync in DP and HDMI displays, and it is required to keep * track of some of the display information in the internal data struct used by @@ -12124,19 +12191,16 @@ static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector, * FreeSync parameters. */ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, - struct edid *edid) + const struct drm_edid *drm_edid) { int i = 0; - struct detailed_timing *timing; - struct detailed_non_pixel *data; - struct detailed_data_monitor_range *range; struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); struct dm_connector_state *dm_con_state = NULL; struct dc_sink *sink; - struct amdgpu_device *adev = drm_to_adev(connector->dev); struct amdgpu_hdmi_vsdb_info vsdb_info = {0}; + const struct edid *edid; bool freesync_capable = false; enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE; @@ -12149,13 +12213,13 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, amdgpu_dm_connector->dc_sink : amdgpu_dm_connector->dc_em_sink; - if (!edid || !sink) { + drm_edid_connector_update(connector, drm_edid); + + if (!drm_edid || !sink) { dm_con_state = to_dm_connector_state(connector->state); amdgpu_dm_connector->min_vfreq = 0; amdgpu_dm_connector->max_vfreq = 0; - connector->display_info.monitor_range.min_vfreq = 0; - connector->display_info.monitor_range.max_vfreq = 0; freesync_capable = false; goto update; @@ -12166,6 +12230,8 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, if (!adev->dm.freesync_module) goto update; + edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw() + /* Some eDP panels only have the refresh rate range info in DisplayID */ if ((connector->display_info.monitor_range.min_vfreq == 0 || connector->display_info.monitor_range.max_vfreq == 0)) @@ -12173,67 +12239,10 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, if (edid && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT || sink->sink_signal == SIGNAL_TYPE_EDP)) { - bool edid_check_required = false; - - if (amdgpu_dm_connector->dc_link && - amdgpu_dm_connector->dc_link->dpcd_caps.allow_invalid_MSA_timing_param) { - if (edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ) { - amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq; - amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq; - if (amdgpu_dm_connector->max_vfreq - - amdgpu_dm_connector->min_vfreq > 10) - freesync_capable = true; - } else { - edid_check_required = edid->version > 1 || - (edid->version == 1 && - edid->revision > 1); - } - } - - if (edid_check_required) { - for (i = 0; i < 4; i++) { - - timing = &edid->detailed_timings[i]; - data = &timing->data.other_data; - range = &data->data.range; - /* - * Check if monitor has continuous frequency mode - */ - if (data->type != EDID_DETAIL_MONITOR_RANGE) - continue; - /* - * Check for flag range limits only. If flag == 1 then - * no additional timing information provided. - * Default GTF, GTF Secondary curve and CVT are not - * supported - */ - if (range->flags != 1) - continue; - - connector->display_info.monitor_range.min_vfreq = range->min_vfreq; - connector->display_info.monitor_range.max_vfreq = range->max_vfreq; - - if (edid->revision >= 4) { - if (data->pad2 & DRM_EDID_RANGE_OFFSET_MIN_VFREQ) - connector->display_info.monitor_range.min_vfreq += 255; - if (data->pad2 & DRM_EDID_RANGE_OFFSET_MAX_VFREQ) - connector->display_info.monitor_range.max_vfreq += 255; - } - - amdgpu_dm_connector->min_vfreq = - connector->display_info.monitor_range.min_vfreq; - amdgpu_dm_connector->max_vfreq = - connector->display_info.monitor_range.max_vfreq; - - break; - } - - if (amdgpu_dm_connector->max_vfreq - - amdgpu_dm_connector->min_vfreq > 10) { - - freesync_capable = true; - } - } + amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq; + amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq; + if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) + freesync_capable = true; parse_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); if (vsdb_info.replay_mode) { @@ -12242,12 +12251,9 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, amdgpu_dm_connector->as_type = ADAPTIVE_SYNC_TYPE_EDP; } - } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) { + } else if (drm_edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) { i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); if (i >= 0 && vsdb_info.freesync_supported) { - timing = &edid->detailed_timings[i]; - data = &timing->data.other_data; - amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz; amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz; if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 15d4690c74d6..6464a8378387 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -673,7 +673,7 @@ struct amdgpu_dm_connector { /* we need to mind the EDID between detect and get modes due to analog/digital/tvencoder */ - struct edid *edid; + const struct drm_edid *drm_edid; /* shared with amdgpu */ struct amdgpu_hpd hpd; @@ -727,7 +727,7 @@ struct amdgpu_dm_connector { /* Cached display modes */ struct drm_display_mode freesync_vid_base; - int psr_skip_count; + int sr_skip_count; bool disallow_edp_enter_psr; /* Record progress status of mst*/ @@ -951,7 +951,7 @@ void dm_restore_drm_connector_state(struct drm_device *dev, struct drm_connector *connector); void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, - struct edid *edid); + const struct drm_edid *drm_edid); void amdgpu_dm_trigger_timing_sync(struct drm_device *dev); @@ -1004,6 +1004,9 @@ void *dm_allocate_gpu_mem(struct amdgpu_device *adev, enum dc_gpu_mem_alloc_type type, size_t size, long long *addr); +void dm_free_gpu_mem(struct amdgpu_device *adev, + enum dc_gpu_mem_alloc_type type, + void *addr); bool amdgpu_dm_is_headless(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c index a2cf2c066a76..64a041c2af05 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -35,8 +35,8 @@ #include "amdgpu_dm_trace.h" #include "amdgpu_dm_debugfs.h" -#define HPD_DETECTION_PERIOD_uS 5000000 -#define HPD_DETECTION_TIME_uS 1000 +#define HPD_DETECTION_PERIOD_uS 2000000 +#define HPD_DETECTION_TIME_uS 100000 void amdgpu_dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc) { @@ -154,6 +154,7 @@ static void amdgpu_dm_crtc_set_panel_sr_feature( amdgpu_dm_psr_enable(vblank_work->stream); if (dm->idle_workqueue && + (dm->dc->config.disable_ips == DMUB_IPS_ENABLE) && dm->dc->idle_optimizations_allowed && dm->idle_workqueue->enable && !dm->idle_workqueue->running) @@ -251,10 +252,8 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work) else if (dm->active_vblank_irq_count) dm->active_vblank_irq_count--; - if (dm->active_vblank_irq_count > 0) { - DRM_DEBUG_KMS("Allow idle optimizations (MALL): false\n"); + if (dm->active_vblank_irq_count > 0) dc_allow_idle_optimizations(dm->dc, false); - } /* * Control PSR based on vblank requirements from OS @@ -266,17 +265,14 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work) * where the SU region is the full hactive*vactive region. See * fill_dc_dirty_rects(). */ - if (vblank_work->stream && vblank_work->stream->link) { + if (vblank_work->stream && vblank_work->stream->link && vblank_work->acrtc) { amdgpu_dm_crtc_set_panel_sr_feature( vblank_work, vblank_work->enable, - vblank_work->acrtc->dm_irq_params.allow_psr_entry || - vblank_work->stream->link->replay_settings.replay_feature_enabled); + vblank_work->acrtc->dm_irq_params.allow_sr_entry); } - if (dm->active_vblank_irq_count == 0) { - DRM_DEBUG_KMS("Allow idle optimizations (MALL): true\n"); + if (dm->active_vblank_irq_count == 0) dc_allow_idle_optimizations(dm->dc, true); - } mutex_unlock(&dm->dc_lock); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index db56b0aa5454..6a97bb2d9160 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -1529,7 +1529,6 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { char *rd_buf = NULL; - char *rd_buf_ptr = NULL; struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct display_stream_compressor *dsc; struct dcn_dsc_state dsc_state = {0}; @@ -1543,8 +1542,6 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf, if (!rd_buf) return -ENOMEM; - rd_buf_ptr = rd_buf; - for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && @@ -1558,10 +1555,9 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf, if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); - snprintf(rd_buf_ptr, str_len, + snprintf(rd_buf, str_len, "%d\n", dsc_state.dsc_clock_en); - rd_buf_ptr += str_len; while (size) { if (*pos >= rd_buf_size) @@ -1719,7 +1715,6 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { char *rd_buf = NULL; - char *rd_buf_ptr = NULL; struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct display_stream_compressor *dsc; struct dcn_dsc_state dsc_state = {0}; @@ -1733,8 +1728,6 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf, if (!rd_buf) return -ENOMEM; - rd_buf_ptr = rd_buf; - for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && @@ -1748,10 +1741,9 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf, if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); - snprintf(rd_buf_ptr, str_len, + snprintf(rd_buf, str_len, "%d\n", dsc_state.dsc_slice_width); - rd_buf_ptr += str_len; while (size) { if (*pos >= rd_buf_size) @@ -1907,7 +1899,6 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { char *rd_buf = NULL; - char *rd_buf_ptr = NULL; struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct display_stream_compressor *dsc; struct dcn_dsc_state dsc_state = {0}; @@ -1921,8 +1912,6 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf, if (!rd_buf) return -ENOMEM; - rd_buf_ptr = rd_buf; - for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && @@ -1936,10 +1925,9 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf, if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); - snprintf(rd_buf_ptr, str_len, + snprintf(rd_buf, str_len, "%d\n", dsc_state.dsc_slice_height); - rd_buf_ptr += str_len; while (size) { if (*pos >= rd_buf_size) @@ -2091,7 +2079,6 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { char *rd_buf = NULL; - char *rd_buf_ptr = NULL; struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct display_stream_compressor *dsc; struct dcn_dsc_state dsc_state = {0}; @@ -2105,8 +2092,6 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf, if (!rd_buf) return -ENOMEM; - rd_buf_ptr = rd_buf; - for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && @@ -2120,10 +2105,9 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf, if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); - snprintf(rd_buf_ptr, str_len, + snprintf(rd_buf, str_len, "%d\n", dsc_state.dsc_bits_per_pixel); - rd_buf_ptr += str_len; while (size) { if (*pos >= rd_buf_size) @@ -2270,7 +2254,6 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { char *rd_buf = NULL; - char *rd_buf_ptr = NULL; struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct display_stream_compressor *dsc; struct dcn_dsc_state dsc_state = {0}; @@ -2284,8 +2267,6 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf, if (!rd_buf) return -ENOMEM; - rd_buf_ptr = rd_buf; - for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && @@ -2299,10 +2280,9 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf, if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); - snprintf(rd_buf_ptr, str_len, + snprintf(rd_buf, str_len, "%d\n", dsc_state.dsc_pic_width); - rd_buf_ptr += str_len; while (size) { if (*pos >= rd_buf_size) @@ -2328,7 +2308,6 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { char *rd_buf = NULL; - char *rd_buf_ptr = NULL; struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct display_stream_compressor *dsc; struct dcn_dsc_state dsc_state = {0}; @@ -2342,8 +2321,6 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf, if (!rd_buf) return -ENOMEM; - rd_buf_ptr = rd_buf; - for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && @@ -2357,10 +2334,9 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf, if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); - snprintf(rd_buf_ptr, str_len, + snprintf(rd_buf, str_len, "%d\n", dsc_state.dsc_pic_height); - rd_buf_ptr += str_len; while (size) { if (*pos >= rd_buf_size) @@ -2401,7 +2377,6 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { char *rd_buf = NULL; - char *rd_buf_ptr = NULL; struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct display_stream_compressor *dsc; struct dcn_dsc_state dsc_state = {0}; @@ -2415,8 +2390,6 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf, if (!rd_buf) return -ENOMEM; - rd_buf_ptr = rd_buf; - for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && @@ -2430,10 +2403,9 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf, if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); - snprintf(rd_buf_ptr, str_len, + snprintf(rd_buf, str_len, "%d\n", dsc_state.dsc_chunk_size); - rd_buf_ptr += str_len; while (size) { if (*pos >= rd_buf_size) @@ -2474,7 +2446,6 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { char *rd_buf = NULL; - char *rd_buf_ptr = NULL; struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct display_stream_compressor *dsc; struct dcn_dsc_state dsc_state = {0}; @@ -2488,8 +2459,6 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf, if (!rd_buf) return -ENOMEM; - rd_buf_ptr = rd_buf; - for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && @@ -2503,10 +2472,9 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf, if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); - snprintf(rd_buf_ptr, str_len, + snprintf(rd_buf, str_len, "%d\n", dsc_state.dsc_slice_bpg_offset); - rd_buf_ptr += str_len; while (size) { if (*pos >= rd_buf_size) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 069e0195e50a..6cbbb71d752b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -23,6 +23,8 @@ * */ +#include <acpi/video.h> + #include <linux/string.h> #include <linux/acpi.h> #include <linux/i2c.h> @@ -44,6 +46,7 @@ #include "dm_helpers.h" #include "ddc_service_types.h" +#include "clk_mgr.h" static u32 edid_extract_panel_id(struct edid *edid) { @@ -641,6 +644,8 @@ static bool execute_synaptics_rc_command(struct drm_dp_aux *aux, // write rc data memmove(rc_data, data, length); ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_DATA, rc_data, sizeof(rc_data)); + if (ret < 0) + goto err; } // write rc offset @@ -649,20 +654,21 @@ static bool execute_synaptics_rc_command(struct drm_dp_aux *aux, rc_offset[2] = (unsigned char) (offset >> 16) & 0xFF; rc_offset[3] = (unsigned char) (offset >> 24) & 0xFF; ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_OFFSET, rc_offset, sizeof(rc_offset)); + if (ret < 0) + goto err; // write rc length rc_length[0] = (unsigned char) length & 0xFF; rc_length[1] = (unsigned char) (length >> 8) & 0xFF; ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_LENGTH, rc_length, sizeof(rc_length)); + if (ret < 0) + goto err; // write rc cmd rc_cmd = cmd | 0x80; ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_COMMAND, &rc_cmd, sizeof(rc_cmd)); - - if (ret < 0) { - DRM_ERROR("%s: write cmd ..., err = %d\n", __func__, ret); - return false; - } + if (ret < 0) + goto err; // poll until active is 0 for (i = 0; i < 10; i++) { @@ -685,6 +691,10 @@ static bool execute_synaptics_rc_command(struct drm_dp_aux *aux, drm_dbg_dp(aux->drm_dev, "success = %d\n", success); return success; + +err: + DRM_ERROR("%s: write cmd ..., err = %d\n", __func__, ret); + return false; } static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux) @@ -891,6 +901,67 @@ bool dm_helpers_is_dp_sink_present(struct dc_link *link) return dp_sink_present; } +static int +dm_helpers_probe_acpi_edid(void *data, u8 *buf, unsigned int block, size_t len) +{ + struct drm_connector *connector = data; + struct acpi_device *acpidev = ACPI_COMPANION(connector->dev->dev); + unsigned char start = block * EDID_LENGTH; + struct edid *edid; + int r; + + if (!acpidev) + return -ENODEV; + + /* fetch the entire edid from BIOS */ + r = acpi_video_get_edid(acpidev, ACPI_VIDEO_DISPLAY_LCD, -1, (void *)&edid); + if (r < 0) { + drm_dbg(connector->dev, "Failed to get EDID from ACPI: %d\n", r); + return r; + } + if (len > r || start > r || start + len > r) { + r = -EINVAL; + goto cleanup; + } + + /* sanity check */ + if (edid->revision < 4 || !(edid->input & DRM_EDID_INPUT_DIGITAL) || + (edid->input & DRM_EDID_DIGITAL_TYPE_MASK) == DRM_EDID_DIGITAL_TYPE_UNDEF) { + r = -EINVAL; + goto cleanup; + } + + memcpy(buf, (void *)edid + start, len); + r = 0; + +cleanup: + kfree(edid); + + return r; +} + +static const struct drm_edid * +dm_helpers_read_acpi_edid(struct amdgpu_dm_connector *aconnector) +{ + struct drm_connector *connector = &aconnector->base; + + if (amdgpu_dc_debug_mask & DC_DISABLE_ACPI_EDID) + return NULL; + + switch (connector->connector_type) { + case DRM_MODE_CONNECTOR_LVDS: + case DRM_MODE_CONNECTOR_eDP: + break; + default: + return NULL; + } + + if (connector->force == DRM_FORCE_OFF) + return NULL; + + return drm_edid_read_custom(connector, dm_helpers_probe_acpi_edid, connector); +} + enum dc_edid_status dm_helpers_read_local_edid( struct dc_context *ctx, struct dc_link *link, @@ -901,7 +972,8 @@ enum dc_edid_status dm_helpers_read_local_edid( struct i2c_adapter *ddc; int retry = 3; enum dc_edid_status edid_status; - struct edid *edid; + const struct drm_edid *drm_edid; + const struct edid *edid; if (link->aux_mode) ddc = &aconnector->dm_dp_aux.aux.ddc; @@ -912,26 +984,31 @@ enum dc_edid_status dm_helpers_read_local_edid( * do check sum and retry to make sure read correct edid. */ do { - - edid = drm_get_edid(&aconnector->base, ddc); + drm_edid = dm_helpers_read_acpi_edid(aconnector); + if (drm_edid) + drm_info(connector->dev, "Using ACPI provided EDID for %s\n", connector->name); + else + drm_edid = drm_edid_read_ddc(connector, ddc); + drm_edid_connector_update(connector, drm_edid); /* DP Compliance Test 4.2.2.6 */ if (link->aux_mode && connector->edid_corrupt) drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, connector->real_edid_checksum); - if (!edid && connector->edid_corrupt) { + if (!drm_edid && connector->edid_corrupt) { connector->edid_corrupt = false; return EDID_BAD_CHECKSUM; } - if (!edid) + if (!drm_edid) return EDID_NO_RESPONSE; + edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw() sink->dc_edid.length = EDID_LENGTH * (edid->extensions + 1); memmove(sink->dc_edid.raw_edid, (uint8_t *)edid, sink->dc_edid.length); /* We don't need the original edid anymore */ - kfree(edid); + drm_edid_free(drm_edid); edid_status = dm_helpers_parse_edid_caps( link, @@ -1054,17 +1131,8 @@ void dm_helpers_free_gpu_mem( void *pvMem) { struct amdgpu_device *adev = ctx->driver_context; - struct dal_allocation *da; - - /* walk the da list in DM */ - list_for_each_entry(da, &adev->dm.da_list, list) { - if (pvMem == da->cpu_ptr) { - amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr); - list_del(&da->list); - kfree(da); - break; - } - } + + dm_free_gpu_mem(adev, type, pvMem); } bool dm_helpers_dmub_outbox_interrupt_control(struct dc_context *ctx, bool enable) @@ -1121,6 +1189,8 @@ bool dm_helpers_dp_handle_test_pattern_request( struct pipe_ctx *pipe_ctx = NULL; struct amdgpu_dm_connector *aconnector = link->priv; struct drm_device *dev = aconnector->base.dev; + struct dc_state *dc_state = ctx->dc->current_state; + struct clk_mgr *clk_mgr = ctx->dc->clk_mgr; int i; for (i = 0; i < MAX_PIPES; i++) { @@ -1221,6 +1291,16 @@ bool dm_helpers_dp_handle_test_pattern_request( pipe_ctx->stream->test_pattern.type = test_pattern; pipe_ctx->stream->test_pattern.color_space = test_pattern_color_space; + /* Temp W/A for compliance test failure */ + dc_state->bw_ctx.bw.dcn.clk.p_state_change_support = false; + dc_state->bw_ctx.bw.dcn.clk.dramclk_khz = clk_mgr->dc_mode_softmax_enabled ? + clk_mgr->bw_params->dc_mode_softmax_memclk : clk_mgr->bw_params->max_memclk_mhz; + dc_state->bw_ctx.bw.dcn.clk.idle_dramclk_khz = dc_state->bw_ctx.bw.dcn.clk.dramclk_khz; + ctx->dc->clk_mgr->funcs->update_clocks( + ctx->dc->clk_mgr, + dc_state, + false); + dc_link_dp_set_test_pattern( (struct dc_link *) link, test_pattern, @@ -1301,4 +1381,4 @@ bool dm_helpers_is_hdr_on(struct dc_context *ctx, struct dc_stream_state *stream { // TODO return false; -}
\ No newline at end of file +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h index 5c9303241aeb..6a7ecc1e4602 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h @@ -33,7 +33,7 @@ struct dm_irq_params { struct mod_vrr_params vrr_params; struct dc_stream_state *stream; int active_planes; - bool allow_psr_entry; + bool allow_sr_entry; struct mod_freesync_config freesync_config; #ifdef CONFIG_DEBUG_FS diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index a08e8a0b696c..6e4359490613 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -129,7 +129,7 @@ dm_dp_mst_connector_destroy(struct drm_connector *connector) dc_sink_release(aconnector->dc_sink); } - kfree(aconnector->edid); + drm_edid_free(aconnector->drm_edid); drm_connector_cleanup(connector); drm_dp_mst_put_port_malloc(aconnector->mst_output_port); @@ -182,7 +182,7 @@ amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector) dc_sink_release(dc_sink); aconnector->dc_sink = NULL; - aconnector->edid = NULL; + aconnector->drm_edid = NULL; aconnector->dsc_aux = NULL; port->passthrough_aux = NULL; } @@ -302,16 +302,18 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) if (!aconnector) return drm_add_edid_modes(connector, NULL); - if (!aconnector->edid) { - struct edid *edid; + if (!aconnector->drm_edid) { + const struct drm_edid *drm_edid; - edid = drm_dp_mst_get_edid(connector, &aconnector->mst_root->mst_mgr, aconnector->mst_output_port); + drm_edid = drm_dp_mst_edid_read(connector, + &aconnector->mst_root->mst_mgr, + aconnector->mst_output_port); - if (!edid) { + if (!drm_edid) { amdgpu_dm_set_mst_status(&aconnector->mst_status, MST_REMOTE_EDID, false); - drm_connector_update_edid_property( + drm_edid_connector_update( &aconnector->base, NULL); @@ -345,7 +347,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) return ret; } - aconnector->edid = edid; + aconnector->drm_edid = drm_edid; amdgpu_dm_set_mst_status(&aconnector->mst_status, MST_REMOTE_EDID, true); } @@ -360,10 +362,13 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) struct dc_sink_init_data init_params = { .link = aconnector->dc_link, .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST }; + const struct edid *edid; + + edid = drm_edid_raw(aconnector->drm_edid); // FIXME: Get rid of drm_edid_raw() dc_sink = dc_link_add_remote_sink( aconnector->dc_link, - (uint8_t *)aconnector->edid, - (aconnector->edid->extensions + 1) * EDID_LENGTH, + (uint8_t *)edid, + (edid->extensions + 1) * EDID_LENGTH, &init_params); if (!dc_sink) { @@ -405,7 +410,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) if (aconnector->dc_sink) { amdgpu_dm_update_freesync_caps( - connector, aconnector->edid); + connector, aconnector->drm_edid); #if defined(CONFIG_DRM_AMD_DC_FP) if (!validate_dsc_caps_on_connector(aconnector)) @@ -419,10 +424,9 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) } } - drm_connector_update_edid_property( - &aconnector->base, aconnector->edid); + drm_edid_connector_update(&aconnector->base, aconnector->drm_edid); - ret = drm_add_edid_modes(connector, aconnector->edid); + ret = drm_edid_connector_add_modes(connector); return ret; } @@ -500,7 +504,7 @@ dm_dp_mst_detect(struct drm_connector *connector, dc_sink_release(aconnector->dc_sink); aconnector->dc_sink = NULL; - aconnector->edid = NULL; + aconnector->drm_edid = NULL; aconnector->dsc_aux = NULL; port->passthrough_aux = NULL; @@ -1120,6 +1124,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, int i, k, ret; bool debugfs_overwrite = false; uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link); + struct drm_connector_state *new_conn_state; memset(params, 0, sizeof(params)); @@ -1127,7 +1132,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, return PTR_ERR(mst_state); /* Set up params */ - DRM_DEBUG_DRIVER("%s: MST_DSC Set up params for %d streams\n", __func__, dc_state->stream_count); + DRM_DEBUG_DRIVER("%s: MST_DSC Try to set up params from %d streams\n", __func__, dc_state->stream_count); for (i = 0; i < dc_state->stream_count; i++) { struct dc_dsc_policy dsc_policy = {0}; @@ -1143,6 +1148,14 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, if (!aconnector->mst_output_port) continue; + new_conn_state = drm_atomic_get_new_connector_state(state, &aconnector->base); + + if (!new_conn_state) { + DRM_DEBUG_DRIVER("%s:%d MST_DSC Skip the stream 0x%p with invalid new_conn_state\n", + __func__, __LINE__, stream); + continue; + } + stream->timing.flags.DSC = 0; params[count].timing = &stream->timing; @@ -1175,6 +1188,8 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, count++; } + DRM_DEBUG_DRIVER("%s: MST_DSC Params set up for %d streams\n", __func__, count); + if (count == 0) { ASSERT(0); return 0; @@ -1302,7 +1317,7 @@ static bool is_dsc_need_re_compute( continue; aconnector = (struct amdgpu_dm_connector *) stream->dm_stream_context; - if (!aconnector || !aconnector->dsc_aux) + if (!aconnector) continue; stream_on_link[new_stream_on_link_num] = aconnector; diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index 0d8498ab9b23..c9a6de110b74 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -3122,7 +3122,7 @@ static enum bp_result bios_parser_get_vram_info( struct dc_vram_info *info) { struct bios_parser *bp = BP_FROM_DCB(dcb); - static enum bp_result result = BP_RESULT_BADBIOSTABLE; + enum bp_result result = BP_RESULT_BADBIOSTABLE; struct atom_common_table_header *header; struct atom_data_revision revision; diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c index adc710fe4a45..8d2cf95ae739 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c @@ -78,10 +78,3 @@ void bios_set_scratch_critical_state( uint32_t critial_state = state ? 1 : 0; REG_UPDATE(BIOS_SCRATCH_6, S6_CRITICAL_STATE, critial_state); } - -uint32_t bios_get_vga_enabled_displays( - struct dc_bios *bios) -{ - return REG_READ(BIOS_SCRATCH_3) & 0XFFFF; -} - diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h index e1b4a40a353d..ab162f2fe577 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h @@ -34,7 +34,6 @@ uint8_t *bios_get_image(struct dc_bios *bp, uint32_t offset, bool bios_is_accelerated_mode(struct dc_bios *bios); void bios_set_scratch_acc_mode_change(struct dc_bios *bios, uint32_t state); void bios_set_scratch_critical_state(struct dc_bios *bios, bool state); -uint32_t bios_get_vga_enabled_displays(struct dc_bios *bios); #define GET_IMAGE(type, offset) ((type *) bios_get_image(&bp->base, offset, sizeof(type))) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c index e93df3d6222e..bc123f1884da 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c @@ -50,12 +50,13 @@ #include "link.h" #include "logger_types.h" + + +#include "yellow_carp_offset.h" #undef DC_LOGGER #define DC_LOGGER \ clk_mgr->base.base.ctx->logger -#include "yellow_carp_offset.h" - #define regCLK1_CLK_PLL_REQ 0x0237 #define regCLK1_CLK_PLL_REQ_BASE_IDX 0 diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c index 29eff386505a..91d872d6d392 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c @@ -53,9 +53,6 @@ #include "logger_types.h" -#undef DC_LOGGER -#define DC_LOGGER \ - clk_mgr->base.base.ctx->logger #define MAX_INSTANCE 7 @@ -77,6 +74,9 @@ static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0, { { 0x0001B200, 0x0242DC00, 0, 0, 0, 0, 0, 0 } }, { { 0x0001B400, 0x0242E000, 0, 0, 0, 0, 0, 0 } } } }; +#undef DC_LOGGER +#define DC_LOGGER \ + clk_mgr->base.base.ctx->logger #define regCLK1_CLK_PLL_REQ 0x0237 #define regCLK1_CLK_PLL_REQ_BASE_IDX 0 diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c index b46a3afe48ca..b77333817f18 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c @@ -55,6 +55,7 @@ #define DC_LOGGER \ clk_mgr->base.base.ctx->logger + #define regCLK1_CLK_PLL_REQ 0x0237 #define regCLK1_CLK_PLL_REQ_BASE_IDX 0 @@ -132,6 +133,8 @@ static void dcn35_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state * for (i = 0; i < dc->res_pool->pipe_count; ++i) { struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; + struct clk_mgr_internal *clk_mgr_internal = TO_CLK_MGR_INTERNAL(clk_mgr_base); + struct dccg *dccg = clk_mgr_internal->dccg; struct pipe_ctx *pipe = safe_to_lower ? &context->res_ctx.pipe_ctx[i] : &dc->current_state->res_ctx.pipe_ctx[i]; @@ -148,8 +151,21 @@ static void dcn35_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state * new_pipe->stream_res.stream_enc && new_pipe->stream_res.stream_enc->funcs->is_fifo_enabled && new_pipe->stream_res.stream_enc->funcs->is_fifo_enabled(new_pipe->stream_res.stream_enc); - if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) || - !pipe->stream->link_enc) && !stream_changed_otg_dig_on) { + + bool has_active_hpo = false; + + if (old_pipe->stream && new_pipe->stream && old_pipe->stream == new_pipe->stream) { + has_active_hpo = dccg->ctx->dc->link_srv->dp_is_128b_132b_signal(old_pipe) && + dccg->ctx->dc->link_srv->dp_is_128b_132b_signal(new_pipe); + + } + + + if (!has_active_hpo && !dccg->ctx->dc->link_srv->dp_is_128b_132b_signal(pipe) && + (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) || + !pipe->stream->link_enc) && !stream_changed_otg_dig_on)) { + + /* This w/a should not trigger when we have a dig active */ if (disable) { if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc) @@ -257,11 +273,11 @@ static void dcn35_notify_host_router_bw(struct clk_mgr *clk_mgr_base, struct dc_ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); uint32_t host_router_bw_kbps[MAX_HOST_ROUTERS_NUM] = { 0 }; int i; - for (i = 0; i < context->stream_count; ++i) { const struct dc_stream_state *stream = context->streams[i]; const struct dc_link *link = stream->link; - uint8_t lowest_dpia_index = 0, hr_index = 0; + uint8_t lowest_dpia_index = 0; + unsigned int hr_index = 0; if (!link) continue; @@ -271,6 +287,8 @@ static void dcn35_notify_host_router_bw(struct clk_mgr *clk_mgr_base, struct dc_ continue; hr_index = (link->link_index - lowest_dpia_index) / 2; + if (hr_index >= MAX_HOST_ROUTERS_NUM) + continue; host_router_bw_kbps[hr_index] += dc_bandwidth_in_kbps_from_timing( &stream->timing, dc_link_get_highest_encoding_format(link)); } @@ -975,11 +993,8 @@ static void dcn35_exit_low_power_state(struct clk_mgr *clk_mgr_base) static bool dcn35_is_ips_supported(struct clk_mgr *clk_mgr_base) { struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); - bool ips_supported = true; - - ips_supported = dcn35_smu_get_ips_supported(clk_mgr) ? true : false; - return ips_supported; + return dcn35_smu_get_ips_supported(clk_mgr) ? true : false; } static void dcn35_init_clocks_fpga(struct clk_mgr *clk_mgr) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 5c39390ecbd5..49fe7dcf9372 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -621,8 +621,8 @@ dc_stream_forward_crc_window(struct dc_stream_state *stream, * dc_stream_configure_crc() - Configure CRC capture for the given stream. * @dc: DC Object * @stream: The stream to configure CRC on. - * @enable: Enable CRC if true, disable otherwise. * @crc_window: CRC window (x/y start/end) information + * @enable: Enable CRC if true, disable otherwise. * @continuous: Capture CRC on every frame if true. Otherwise, only capture * once. * @@ -1157,6 +1157,8 @@ static void dc_update_visual_confirm_color(struct dc *dc, struct dc_state *conte get_surface_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE) get_surface_tile_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); + else if (dc->debug.visual_confirm == VISUAL_CONFIRM_HW_CURSOR) + get_cursor_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); else { if (dc->ctx->dce_version < DCN_VERSION_2_0) color_space_to_black_color( @@ -1233,16 +1235,8 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context) */ if (is_phantom) { if (tg->funcs->enable_crtc) { - int main_pipe_width = 0, main_pipe_height = 0; - struct dc_stream_state *old_paired_stream = dc_state_get_paired_subvp_stream(dc->current_state, old_stream); - - if (old_paired_stream) { - main_pipe_width = old_paired_stream->dst.width; - main_pipe_height = old_paired_stream->dst.height; - } - - if (dc->hwss.blank_phantom) - dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height); + if (dc->hwseq->funcs.blank_pixel_data) + dc->hwseq->funcs.blank_pixel_data(dc, pipe, true); tg->funcs->enable_crtc(tg); } } @@ -1437,6 +1431,7 @@ void dc_hardware_init(struct dc *dc) detect_edp_presence(dc); if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW) dc->hwss.init_hw(dc); + dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0); } void dc_init_callbacks(struct dc *dc, @@ -1876,6 +1871,41 @@ void dc_z10_save_init(struct dc *dc) dc->hwss.z10_save_init(dc); } +/* Set a pipe unlock order based on the change in DET allocation and stores it in dc scratch memory + * Prevents over allocation of DET during unlock process + * e.g. 2 pipe config with different streams with a max of 20 DET segments + * Before: After: + * - Pipe0: 10 DET segments - Pipe0: 12 DET segments + * - Pipe1: 10 DET segments - Pipe1: 8 DET segments + * If Pipe0 gets updated first, 22 DET segments will be allocated + */ +static void determine_pipe_unlock_order(struct dc *dc, struct dc_state *context) +{ + unsigned int i = 0; + struct pipe_ctx *pipe = NULL; + struct timing_generator *tg = NULL; + + if (!dc->config.set_pipe_unlock_order) + return; + + memset(dc->scratch.pipes_to_unlock_first, 0, sizeof(dc->scratch.pipes_to_unlock_first)); + for (i = 0; i < dc->res_pool->pipe_count; i++) { + pipe = &context->res_ctx.pipe_ctx[i]; + tg = pipe->stream_res.tg; + + if (!resource_is_pipe_type(pipe, OTG_MASTER) || + !tg->funcs->is_tg_enabled(tg) || + dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { + continue; + } + + if (resource_calculate_det_for_stream(context, pipe) < + resource_calculate_det_for_stream(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i])) { + dc->scratch.pipes_to_unlock_first[i] = true; + } + } +} + /** * dc_commit_state_no_check - Apply context to the hardware * @@ -1974,6 +2004,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c context->streams[i]->update_flags.bits.dsc_changed = prev_dsc_changed; } + determine_pipe_unlock_order(dc, context); /* Program all planes within new context*/ if (dc->res_pool->funcs->prepare_mcache_programming) dc->res_pool->funcs->prepare_mcache_programming(dc, context); @@ -2156,6 +2187,14 @@ enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params context->power_source = params->power_source; res = dc_validate_with_context(dc, set, params->stream_count, context, false); + + /* + * Only update link encoder to stream assignment after bandwidth validation passed. + */ + if (res == DC_OK && dc->res_pool->funcs->link_encs_assign) + dc->res_pool->funcs->link_encs_assign( + dc, context, context->streams, context->stream_count); + if (res != DC_OK) { BREAK_TO_DEBUGGER(); goto fail; @@ -2477,41 +2516,35 @@ static enum surface_update_type get_scaling_info_update_type( if (!u->scaling_info) return UPDATE_TYPE_FAST; - if (u->scaling_info->dst_rect.width != u->surface->dst_rect.width + if (u->scaling_info->src_rect.width != u->surface->src_rect.width + || u->scaling_info->src_rect.height != u->surface->src_rect.height + || u->scaling_info->dst_rect.width != u->surface->dst_rect.width || u->scaling_info->dst_rect.height != u->surface->dst_rect.height + || u->scaling_info->clip_rect.width != u->surface->clip_rect.width + || u->scaling_info->clip_rect.height != u->surface->clip_rect.height || u->scaling_info->scaling_quality.integer_scaling != - u->surface->scaling_quality.integer_scaling - ) { + u->surface->scaling_quality.integer_scaling) { update_flags->bits.scaling_change = 1; + if (u->scaling_info->src_rect.width > u->surface->src_rect.width + || u->scaling_info->src_rect.height > u->surface->src_rect.height) + /* Making src rect bigger requires a bandwidth change */ + update_flags->bits.clock_change = 1; + if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width || u->scaling_info->dst_rect.height < u->surface->dst_rect.height) && (u->scaling_info->dst_rect.width < u->surface->src_rect.width || u->scaling_info->dst_rect.height < u->surface->src_rect.height)) /* Making dst rect smaller requires a bandwidth change */ update_flags->bits.bandwidth_change = 1; - } - - if (u->scaling_info->src_rect.width != u->surface->src_rect.width - || u->scaling_info->src_rect.height != u->surface->src_rect.height) { - update_flags->bits.scaling_change = 1; - if (u->scaling_info->src_rect.width > u->surface->src_rect.width - || u->scaling_info->src_rect.height > u->surface->src_rect.height) - /* Making src rect bigger requires a bandwidth change */ - update_flags->bits.clock_change = 1; + if (u->scaling_info->src_rect.width > dc->caps.max_optimizable_video_width && + (u->scaling_info->clip_rect.width > u->surface->clip_rect.width || + u->scaling_info->clip_rect.height > u->surface->clip_rect.height)) + /* Changing clip size of a large surface may result in MPC slice count change */ + update_flags->bits.bandwidth_change = 1; } - if (u->scaling_info->src_rect.width > dc->caps.max_optimizable_video_width && - (u->scaling_info->clip_rect.width > u->surface->clip_rect.width || - u->scaling_info->clip_rect.height > u->surface->clip_rect.height)) - /* Changing clip size of a large surface may result in MPC slice count change */ - update_flags->bits.bandwidth_change = 1; - - if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width || - u->scaling_info->clip_rect.height != u->surface->clip_rect.height) - update_flags->bits.clip_size_change = 1; - if (u->scaling_info->src_rect.x != u->surface->src_rect.x || u->scaling_info->src_rect.y != u->surface->src_rect.y || u->scaling_info->clip_rect.x != u->surface->clip_rect.x @@ -2520,13 +2553,13 @@ static enum surface_update_type get_scaling_info_update_type( || u->scaling_info->dst_rect.y != u->surface->dst_rect.y) update_flags->bits.position_change = 1; + /* process every update flag before returning */ if (update_flags->bits.clock_change || update_flags->bits.bandwidth_change || update_flags->bits.scaling_change) return UPDATE_TYPE_FULL; - if (update_flags->bits.position_change || - update_flags->bits.clip_size_change) + if (update_flags->bits.position_change) return UPDATE_TYPE_MED; return UPDATE_TYPE_FAST; @@ -2617,7 +2650,8 @@ static enum surface_update_type det_surface_update(const struct dc *dc, elevate_update_type(&overall_type, type); } - if (update_flags->bits.lut_3d) { + if (update_flags->bits.lut_3d && + u->surface->mcm_luts.lut3d_data.lut3d_src != DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM) { type = UPDATE_TYPE_FULL; elevate_update_type(&overall_type, type); } @@ -2637,6 +2671,29 @@ static enum surface_update_type det_surface_update(const struct dc *dc, return overall_type; } +/* May need to flip the desktop plane in cases where MPO plane receives a flip but desktop plane doesn't + * while both planes are flip_immediate + */ +static void force_immediate_gsl_plane_flip(struct dc *dc, struct dc_surface_update *updates, int surface_count) +{ + bool has_flip_immediate_plane = false; + int i; + + for (i = 0; i < surface_count; i++) { + if (updates[i].surface->flip_immediate) { + has_flip_immediate_plane = true; + break; + } + } + + if (has_flip_immediate_plane && surface_count > 1) { + for (i = 0; i < surface_count; i++) { + if (updates[i].surface->flip_immediate) + updates[i].surface->update_flags.bits.addr_update = 1; + } + } +} + static enum surface_update_type check_update_surfaces_for_stream( struct dc *dc, struct dc_surface_update *updates, @@ -2699,6 +2756,9 @@ static enum surface_update_type check_update_surfaces_for_stream( if (stream_update->scaler_sharpener_update) su_flags->bits.scaler_sharpener = 1; + if (stream_update->sharpening_required) + su_flags->bits.sharpening_required = 1; + if (su_flags->raw != 0) overall_type = UPDATE_TYPE_FULL; @@ -2870,10 +2930,20 @@ static void copy_surface_update_to_plane( sizeof(struct dc_transfer_func_distributed_points)); } - if (srf_update->func_shaper) + if (srf_update->cm2_params) { + surface->mcm_shaper_3dlut_setting = srf_update->cm2_params->component_settings.shaper_3dlut_setting; + surface->mcm_lut1d_enable = srf_update->cm2_params->component_settings.lut1d_enable; + surface->mcm_luts = srf_update->cm2_params->cm2_luts; + } + + if (srf_update->func_shaper) { memcpy(&surface->in_shaper_func, srf_update->func_shaper, sizeof(surface->in_shaper_func)); + if (surface->mcm_shaper_3dlut_setting >= DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER) + surface->mcm_luts.shaper = &surface->in_shaper_func; + } + if (srf_update->lut3d_func) memcpy(&surface->lut3d_func, srf_update->lut3d_func, sizeof(surface->lut3d_func)); @@ -2886,10 +2956,17 @@ static void copy_surface_update_to_plane( surface->sdr_white_level_nits = srf_update->sdr_white_level_nits; - if (srf_update->blend_tf) + if (srf_update->blend_tf) { memcpy(&surface->blend_tf, srf_update->blend_tf, sizeof(surface->blend_tf)); + if (surface->mcm_lut1d_enable) + surface->mcm_luts.lut1d_func = &surface->blend_tf; + } + + if (srf_update->cm2_params || srf_update->blend_tf) + surface->lut_bank_a = !surface->lut_bank_a; + if (srf_update->input_csc_color_matrix) surface->input_csc_color_matrix = *srf_update->input_csc_color_matrix; @@ -2901,11 +2978,7 @@ static void copy_surface_update_to_plane( if (srf_update->gamut_remap_matrix) surface->gamut_remap_matrix = *srf_update->gamut_remap_matrix; - if (srf_update->cm2_params) { - surface->mcm_shaper_3dlut_setting = srf_update->cm2_params->component_settings.shaper_3dlut_setting; - surface->mcm_lut1d_enable = srf_update->cm2_params->component_settings.lut1d_enable; - surface->mcm_luts = srf_update->cm2_params->cm2_luts; - } + if (srf_update->cursor_csc_color_matrix) surface->cursor_csc_color_matrix = *srf_update->cursor_csc_color_matrix; @@ -3037,6 +3110,8 @@ static void copy_stream_update_to_stream(struct dc *dc, } if (update->scaler_sharpener_update) stream->scaler_sharpener_update = *update->scaler_sharpener_update; + if (update->sharpening_required) + stream->sharpening_required = *update->sharpening_required; } static void backup_planes_and_stream_state( @@ -3066,7 +3141,10 @@ static void restore_planes_and_stream_state( return; for (i = 0; i < status->plane_count; i++) { + /* refcount will always be valid, restore everything else */ + struct kref refcount = status->plane_states[i]->refcount; *status->plane_states[i] = scratch->plane_states[i]; + status->plane_states[i]->refcount = refcount; } *stream = scratch->stream_state; } @@ -3153,6 +3231,11 @@ static bool update_planes_and_stream_state(struct dc *dc, context = dc->current_state; update_type = dc_check_update_surfaces_for_stream( dc, srf_updates, surface_count, stream_update, stream_status); + /* It is possible to receive a flip for one plane while there are multiple flip_immediate planes in the same stream. + * E.g. Desktop and MPO plane are flip_immediate but only the MPO plane received a flip + * Force the other flip_immediate planes to flip so GSL doesn't wait for a flip that won't come. + */ + force_immediate_gsl_plane_flip(dc, srf_updates, surface_count); if (update_type == UPDATE_TYPE_FULL) backup_planes_and_stream_state(&dc->scratch.current_state, stream); @@ -3225,8 +3308,7 @@ static bool update_planes_and_stream_state(struct dc *dc, if (update_type != UPDATE_TYPE_MED) continue; - if (surface->update_flags.bits.clip_size_change || - surface->update_flags.bits.position_change) { + if (surface->update_flags.bits.position_change) { for (j = 0; j < dc->res_pool->pipe_count; j++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; @@ -3625,6 +3707,10 @@ static void commit_planes_for_stream_fast(struct dc *dc, struct pipe_ctx *top_pipe_to_program = NULL; struct dc_stream_status *stream_status = NULL; bool should_offload_fams2_flip = false; + bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST); + + if (should_lock_all_pipes) + determine_pipe_unlock_order(dc, context); if (dc->debug.fams2_config.bits.enable && dc->debug.fams2_config.bits.enable_offload_flip && @@ -3677,13 +3763,14 @@ static void commit_planes_for_stream_fast(struct dc *dc, if (!pipe_ctx->plane_state) continue; - if (should_update_pipe_for_plane(context, pipe_ctx, plane_state)) + if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) continue; + pipe_ctx->plane_state->triplebuffer_flips = false; if (update_type == UPDATE_TYPE_FAST && - dc->hwss.program_triplebuffer != NULL && - !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) { - /*triple buffer for VUpdate only*/ + dc->hwss.program_triplebuffer != NULL && + !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) { + /*triple buffer for VUpdate only*/ pipe_ctx->plane_state->triplebuffer_flips = true; } } @@ -3742,6 +3829,8 @@ static void commit_planes_for_stream(struct dc *dc, bool subvp_curr_use = false; uint8_t current_stream_mask = 0; + if (should_lock_all_pipes) + determine_pipe_unlock_order(dc, context); // Once we apply the new subvp context to hardware it won't be in the // dc->current_state anymore, so we have to cache it before we apply // the new SubVP context @@ -3749,7 +3838,7 @@ static void commit_planes_for_stream(struct dc *dc, dc_exit_ips_for_hw_access(dc); dc_z10_restore(dc); - if (update_type == UPDATE_TYPE_FULL) + if (update_type == UPDATE_TYPE_FULL && dc->optimized_required) hwss_process_outstanding_hw_updates(dc, dc->current_state); for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -3776,6 +3865,9 @@ static void commit_planes_for_stream(struct dc *dc, context_clock_trace(dc, context); } + if (update_type == UPDATE_TYPE_FULL) + hwss_wait_for_outstanding_hw_updates(dc, dc->current_state); + top_pipe_to_program = resource_get_otg_master_for_stream( &context->res_ctx, stream); @@ -3920,19 +4012,20 @@ static void commit_planes_for_stream(struct dc *dc, struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; if (!pipe_ctx->plane_state) continue; - if (should_update_pipe_for_plane(context, pipe_ctx, plane_state)) + if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) continue; pipe_ctx->plane_state->triplebuffer_flips = false; if (update_type == UPDATE_TYPE_FAST && - dc->hwss.program_triplebuffer != NULL && - !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) { - /*triple buffer for VUpdate only*/ - pipe_ctx->plane_state->triplebuffer_flips = true; + dc->hwss.program_triplebuffer != NULL && + !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) { + /*triple buffer for VUpdate only*/ + pipe_ctx->plane_state->triplebuffer_flips = true; } } if (update_type == UPDATE_TYPE_FULL) { /* force vsync flip when reconfiguring pipes to prevent underflow */ plane_state->flip_immediate = false; + plane_state->triplebuffer_flips = false; } } @@ -3953,7 +4046,6 @@ static void commit_planes_for_stream(struct dc *dc, continue; ASSERT(!pipe_ctx->plane_state->triplebuffer_flips); - if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { /*turn off triple buffer for full update*/ dc->hwss.program_triplebuffer( @@ -4028,7 +4120,7 @@ static void commit_planes_for_stream(struct dc *dc, /*program triple buffer after lock based on flip type*/ if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { - /*only enable triplebuffer for fast_update*/ + /*only enable triplebuffer for fast_update*/ dc->hwss.program_triplebuffer( dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); } @@ -4777,6 +4869,11 @@ static bool update_planes_and_stream_v1(struct dc *dc, update_type = dc_check_update_surfaces_for_stream( dc, srf_updates, surface_count, stream_update, stream_status); + /* It is possible to receive a flip for one plane while there are multiple flip_immediate planes in the same stream. + * E.g. Desktop and MPO plane are flip_immediate but only the MPO plane received a flip + * Force the other flip_immediate planes to flip so GSL doesn't wait for a flip that won't come. + */ + force_immediate_gsl_plane_flip(dc, srf_updates, surface_count); if (update_type >= UPDATE_TYPE_FULL) { @@ -5065,11 +5162,26 @@ static bool update_planes_and_stream_v3(struct dc *dc, return true; } +static void clear_update_flags(struct dc_surface_update *srf_updates, + int surface_count, struct dc_stream_state *stream) +{ + int i; + + if (stream) + stream->update_flags.raw = 0; + + for (i = 0; i < surface_count; i++) + if (srf_updates[i].surface) + srf_updates[i].surface->update_flags.raw = 0; +} + bool dc_update_planes_and_stream(struct dc *dc, struct dc_surface_update *srf_updates, int surface_count, struct dc_stream_state *stream, struct dc_stream_update *stream_update) { + bool ret = false; + dc_exit_ips_for_hw_access(dc); /* * update planes and stream version 3 separates FULL and FAST updates @@ -5086,10 +5198,16 @@ bool dc_update_planes_and_stream(struct dc *dc, * features as they are now transparent to the new sequence. */ if (dc->ctx->dce_version >= DCN_VERSION_4_01) - return update_planes_and_stream_v3(dc, srf_updates, + ret = update_planes_and_stream_v3(dc, srf_updates, surface_count, stream, stream_update); - return update_planes_and_stream_v2(dc, srf_updates, + else + ret = update_planes_and_stream_v2(dc, srf_updates, surface_count, stream, stream_update); + + if (ret) + clear_update_flags(srf_updates, surface_count, stream); + + return ret; } void dc_commit_updates_for_stream(struct dc *dc, @@ -5099,6 +5217,8 @@ void dc_commit_updates_for_stream(struct dc *dc, struct dc_stream_update *stream_update, struct dc_state *state) { + bool ret = false; + dc_exit_ips_for_hw_access(dc); /* TODO: Since change commit sequence can have a huge impact, * we decided to only enable it for DCN3x. However, as soon as @@ -5106,17 +5226,17 @@ void dc_commit_updates_for_stream(struct dc *dc, * the new sequence for all ASICs. */ if (dc->ctx->dce_version >= DCN_VERSION_4_01) { - update_planes_and_stream_v3(dc, srf_updates, surface_count, + ret = update_planes_and_stream_v3(dc, srf_updates, surface_count, stream, stream_update); - return; - } - if (dc->ctx->dce_version >= DCN_VERSION_3_2) { - update_planes_and_stream_v2(dc, srf_updates, surface_count, + } else if (dc->ctx->dce_version >= DCN_VERSION_3_2) { + ret = update_planes_and_stream_v2(dc, srf_updates, surface_count, stream, stream_update); - return; - } - update_planes_and_stream_v1(dc, srf_updates, surface_count, stream, - stream_update, state); + } else + ret = update_planes_and_stream_v1(dc, srf_updates, surface_count, stream, + stream_update, state); + + if (ret) + clear_update_flags(srf_updates, surface_count, stream); } uint8_t dc_get_current_stream_count(struct dc *dc) @@ -5315,8 +5435,10 @@ bool dc_set_ips_disable(struct dc *dc, unsigned int disable_ips) void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, char const *caller_name) { - if (dc->debug.disable_idle_power_optimizations) + if (dc->debug.disable_idle_power_optimizations) { + DC_LOG_DEBUG("%s: disabled\n", __func__); return; + } if (allow != dc->idle_optimizations_allowed) DC_LOG_IPS("%s: allow_idle old=%d new=%d (caller=%s)\n", __func__, @@ -5333,8 +5455,10 @@ void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, char const return; if (dc->hwss.apply_idle_power_optimizations && dc->clk_mgr != NULL && - dc->hwss.apply_idle_power_optimizations(dc, allow)) + dc->hwss.apply_idle_power_optimizations(dc, allow)) { dc->idle_optimizations_allowed = allow; + DC_LOG_DEBUG("%s: %s\n", __func__, allow ? "enabled" : "disabled"); + } } void dc_exit_ips_for_hw_access_internal(struct dc *dc, const char *caller_name) @@ -5976,7 +6100,30 @@ struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state { struct dc_power_profile profile = { 0 }; - profile.power_level += !context->bw_ctx.bw.dcn.clk.p_state_change_support; + profile.power_level = !context->bw_ctx.bw.dcn.clk.p_state_change_support; + if (!context->clk_mgr || !context->clk_mgr->ctx || !context->clk_mgr->ctx->dc) + return profile; + struct dc *dc = context->clk_mgr->ctx->dc; + if (dc->res_pool->funcs->get_power_profile) + profile.power_level = dc->res_pool->funcs->get_power_profile(context); return profile; } + +/* + ********************************************************************************** + * dc_get_det_buffer_size_from_state() - extracts detile buffer size from dc state + * + * Called when DM wants to log detile buffer size from dc_state + * + ********************************************************************************** + */ +unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context) +{ + struct dc *dc = context->clk_mgr->ctx->dc; + + if (dc->res_pool->funcs->get_det_buffer_size) + return dc->res_pool->funcs->get_det_buffer_size(context); + else + return 0; +} diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c index 801cdbc8117d..af1ea5792560 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c @@ -46,11 +46,6 @@ DC_LOG_IF_TRACE(__VA_ARGS__); \ } while (0) -#define TIMING_TRACE(...) do {\ - if (dc->debug.timing_trace) \ - DC_LOG_SYNC(__VA_ARGS__); \ -} while (0) - #define CLOCK_TRACE(...) do {\ if (dc->debug.clock_trace) \ DC_LOG_BANDWIDTH_CALCS(__VA_ARGS__); \ @@ -306,43 +301,6 @@ void post_surface_trace(struct dc *dc) } -void context_timing_trace( - struct dc *dc, - struct resource_context *res_ctx) -{ - int i; - int h_pos[MAX_PIPES] = {0}, v_pos[MAX_PIPES] = {0}; - struct crtc_position position; - unsigned int underlay_idx = dc->res_pool->underlay_pipe_index; - DC_LOGGER_INIT(dc->ctx->logger); - - - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; - /* get_position() returns CRTC vertical/horizontal counter - * hence not applicable for underlay pipe - */ - if (pipe_ctx->stream == NULL || pipe_ctx->pipe_idx == underlay_idx) - continue; - - pipe_ctx->stream_res.tg->funcs->get_position(pipe_ctx->stream_res.tg, &position); - h_pos[i] = position.horizontal_count; - v_pos[i] = position.vertical_count; - } - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; - - if (pipe_ctx->stream == NULL || pipe_ctx->pipe_idx == underlay_idx) - continue; - - TIMING_TRACE("OTG_%d H_tot:%d V_tot:%d H_pos:%d V_pos:%d\n", - pipe_ctx->stream_res.tg->inst, - pipe_ctx->stream->timing.h_total, - pipe_ctx->stream->timing.v_total, - h_pos[i], v_pos[i]); - } -} - void context_clock_trace( struct dc *dc, struct dc_state *context) @@ -434,3 +392,43 @@ char *dc_status_to_str(enum dc_status status) return "Unexpected status error"; } + +char *dc_pixel_encoding_to_str(enum dc_pixel_encoding pixel_encoding) +{ + switch (pixel_encoding) { + case PIXEL_ENCODING_RGB: + return "RGB"; + case PIXEL_ENCODING_YCBCR422: + return "YUV422"; + case PIXEL_ENCODING_YCBCR444: + return "YUV444"; + case PIXEL_ENCODING_YCBCR420: + return "YUV420"; + default: + return "Unknown"; + } +} + +char *dc_color_depth_to_str(enum dc_color_depth color_depth) +{ + switch (color_depth) { + case COLOR_DEPTH_666: + return "6-bpc"; + case COLOR_DEPTH_888: + return "8-bpc"; + case COLOR_DEPTH_101010: + return "10-bpc"; + case COLOR_DEPTH_121212: + return "12-bpc"; + case COLOR_DEPTH_141414: + return "14-bpc"; + case COLOR_DEPTH_161616: + return "16-bpc"; + case COLOR_DEPTH_999: + return "9-bpc"; + case COLOR_DEPTH_111111: + return "11-bpc"; + default: + return "Unknown"; + } +} diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c index 7ee2be8f82c4..252af83e34a5 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c @@ -312,11 +312,11 @@ void get_mpctree_visual_confirm_color( { const struct tg_color pipe_colors[6] = { {MAX_TG_COLOR_VALUE, 0, 0}, /* red */ - {MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE / 4, 0}, /* orange */ {MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE, 0}, /* yellow */ {0, MAX_TG_COLOR_VALUE, 0}, /* green */ + {0, MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE}, /* cyan */ {0, 0, MAX_TG_COLOR_VALUE}, /* blue */ - {MAX_TG_COLOR_VALUE / 2, 0, MAX_TG_COLOR_VALUE / 2}, /* purple */ + {MAX_TG_COLOR_VALUE, 0, MAX_TG_COLOR_VALUE}, /* magenta */ }; struct pipe_ctx *top_pipe = pipe_ctx; @@ -497,6 +497,23 @@ void get_mclk_switch_visual_confirm_color( } } +void get_cursor_visual_confirm_color( + struct pipe_ctx *pipe_ctx, + struct tg_color *color) +{ + uint32_t color_value = MAX_TG_COLOR_VALUE; + + if (pipe_ctx->stream && pipe_ctx->stream->cursor_position.enable) { + color->color_r_cr = color_value; + color->color_g_y = 0; + color->color_b_cb = 0; + } else { + color->color_r_cr = 0; + color->color_g_y = 0; + color->color_b_cb = color_value; + } +} + void set_p_state_switch_method( struct dc *dc, struct dc_state *context, @@ -881,6 +898,9 @@ void hwss_setup_dpp(union block_sequence_params *params) struct dpp *dpp = pipe_ctx->plane_res.dpp; struct dc_plane_state *plane_state = pipe_ctx->plane_state; + if (!plane_state) + return; + if (dpp && dpp->funcs->dpp_setup) { // program the input csc dpp->funcs->dpp_setup(dpp, @@ -1071,8 +1091,13 @@ void hwss_wait_for_outstanding_hw_updates(struct dc *dc, struct dc_state *dc_con if (!pipe_ctx->stream) continue; - if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear) - pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg); + /* For full update we must wait for all double buffer updates, not just DRR updates. This + * is particularly important for minimal transitions. Only check for OTG_MASTER pipes, + * as non-OTG Master pipes share the same OTG as + */ + if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) && dc->hwss.wait_for_all_pending_updates) { + dc->hwss.wait_for_all_pending_updates(pipe_ctx); + } hubp = pipe_ctx->plane_res.hubp; if (!hubp) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c index dfdfe22d9e85..457d60eeb486 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c @@ -430,11 +430,10 @@ bool dc_link_get_backlight_level_nits(struct dc_link *link, } bool dc_link_set_backlight_level(const struct dc_link *link, - uint32_t backlight_pwm_u16_16, - uint32_t frame_ramp) + struct set_backlight_level_params *backlight_level_params) { return link->dc->link_srv->edp_set_backlight_level(link, - backlight_pwm_u16_16, frame_ramp); + backlight_level_params); } bool dc_link_set_backlight_level_nits(struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index c7599c40d4be..626f75b6ad00 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -765,25 +765,6 @@ static inline void get_vp_scan_direction( *flip_horz_scan_dir = !*flip_horz_scan_dir; } -/* - * This is a preliminary vp size calculation to allow us to check taps support. - * The result is completely overridden afterwards. - */ -static void calculate_viewport_size(struct pipe_ctx *pipe_ctx) -{ - struct scaler_data *data = &pipe_ctx->plane_res.scl_data; - - data->viewport.width = dc_fixpt_ceil(dc_fixpt_mul_int(data->ratios.horz, data->recout.width)); - data->viewport.height = dc_fixpt_ceil(dc_fixpt_mul_int(data->ratios.vert, data->recout.height)); - data->viewport_c.width = dc_fixpt_ceil(dc_fixpt_mul_int(data->ratios.horz_c, data->recout.width)); - data->viewport_c.height = dc_fixpt_ceil(dc_fixpt_mul_int(data->ratios.vert_c, data->recout.height)); - if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 || - pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) { - swap(data->viewport.width, data->viewport.height); - swap(data->viewport_c.width, data->viewport_c.height); - } -} - static struct rect intersect_rec(const struct rect *r0, const struct rect *r1) { struct rect rec; @@ -1468,6 +1449,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) const struct dc_plane_state *plane_state = pipe_ctx->plane_state; struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; const struct rect odm_slice_src = resource_get_odm_slice_src_rect(pipe_ctx); + struct scaling_taps temp = {0}; bool res = false; DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); @@ -1519,14 +1501,16 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) res = spl_calculate_scaler_params(spl_in, spl_out); // Convert respective out params from SPL to scaler data translate_SPL_out_params_to_pipe_ctx(pipe_ctx, spl_out); + + /* Ignore scaler failure if pipe context plane is phantom plane */ + if (!res && plane_state->is_phantom) + res = true; } else { #endif /* depends on h_active */ calculate_recout(pipe_ctx); /* depends on pixel format */ calculate_scaling_ratios(pipe_ctx); - /* depends on scaling ratios and recout, does not calculate offset yet */ - calculate_viewport_size(pipe_ctx); /* * LB calculations depend on vp size, h/v_active and scaling ratios @@ -1547,6 +1531,24 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) pipe_ctx->plane_res.scl_data.lb_params.alpha_en = plane_state->per_pixel_alpha; + // get TAP value with 100x100 dummy data for max scaling qualify, override + // if a new scaling quality required + pipe_ctx->plane_res.scl_data.viewport.width = 100; + pipe_ctx->plane_res.scl_data.viewport.height = 100; + pipe_ctx->plane_res.scl_data.viewport_c.width = 100; + pipe_ctx->plane_res.scl_data.viewport_c.height = 100; + if (pipe_ctx->plane_res.xfm != NULL) + res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps( + pipe_ctx->plane_res.xfm, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality); + + if (pipe_ctx->plane_res.dpp != NULL) + res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps( + pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality); + + temp = pipe_ctx->plane_res.scl_data.taps; + + calculate_inits_and_viewports(pipe_ctx); + if (pipe_ctx->plane_res.xfm != NULL) res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps( pipe_ctx->plane_res.xfm, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality); @@ -1573,11 +1575,14 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) &plane_state->scaling_quality); } - /* - * Depends on recout, scaling ratios, h_active and taps - * May need to re-check lb size after this in some obscure scenario - */ - if (res) + /* Ignore scaler failure if pipe context plane is phantom plane */ + if (!res && plane_state->is_phantom) + res = true; + + if (res && (pipe_ctx->plane_res.scl_data.taps.v_taps != temp.v_taps || + pipe_ctx->plane_res.scl_data.taps.h_taps != temp.h_taps || + pipe_ctx->plane_res.scl_data.taps.v_taps_c != temp.v_taps_c || + pipe_ctx->plane_res.scl_data.taps.h_taps_c != temp.h_taps_c)) calculate_inits_and_viewports(pipe_ctx); /* @@ -2089,7 +2094,8 @@ int resource_get_odm_slice_dst_width(struct pipe_ctx *otg_master, count = resource_get_odm_slice_count(otg_master); h_active = timing->h_addressable + timing->h_border_left + - timing->h_border_right; + timing->h_border_right + + otg_master->hblank_borrow; width = h_active / count; if (otg_master->stream_res.tg) @@ -4022,6 +4028,41 @@ fail: } /** + * decide_hblank_borrow - Decides the horizontal blanking borrow value for a given pipe context. + * @pipe_ctx: Pointer to the pipe context structure. + * + * This function calculates the horizontal blanking borrow value for a given pipe context based on the + * display stream compression (DSC) configuration. If the horizontal active pixels (hactive) are less + * than the total width of the DSC slices, it sets the hblank_borrow value to the difference. If the + * total horizontal timing minus the hblank_borrow value is less than 32, it resets the hblank_borrow + * value to 0. + */ +static void decide_hblank_borrow(struct pipe_ctx *pipe_ctx) +{ + uint32_t hactive; + uint32_t ceil_slice_width; + struct dc_stream_state *stream = NULL; + + if (!pipe_ctx) + return; + + stream = pipe_ctx->stream; + + if (stream->timing.flags.DSC) { + hactive = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right; + + /* Assume if determined slices does not divide Hactive evenly, Hborrow is needed for padding*/ + if (hactive % stream->timing.dsc_cfg.num_slices_h != 0) { + ceil_slice_width = (hactive / stream->timing.dsc_cfg.num_slices_h) + 1; + pipe_ctx->hblank_borrow = ceil_slice_width * stream->timing.dsc_cfg.num_slices_h - hactive; + + if (stream->timing.h_total - hactive - pipe_ctx->hblank_borrow < 32) + pipe_ctx->hblank_borrow = 0; + } + } +} + +/** * dc_validate_global_state() - Determine if hardware can support a given state * * @dc: dc struct for this driver @@ -4059,6 +4100,10 @@ enum dc_status dc_validate_global_state( if (pipe_ctx->stream != stream) continue; + /* Decide whether hblank borrow is needed and save it in pipe_ctx */ + if (dc->debug.enable_hblank_borrow) + decide_hblank_borrow(pipe_ctx); + if (dc->res_pool->funcs->patch_unknown_plane_state && pipe_ctx->plane_state && pipe_ctx->plane_state->tiling_info.gfx9.swizzle == DC_SW_UNKNOWN) { @@ -4094,14 +4139,6 @@ enum dc_status dc_validate_global_state( if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate)) result = DC_FAIL_BANDWIDTH_VALIDATE; - /* - * Only update link encoder to stream assignment after bandwidth validation passed. - * TODO: Split out assignment and validation. - */ - if (result == DC_OK && dc->res_pool->funcs->link_encs_assign && fast_validate == false) - dc->res_pool->funcs->link_encs_assign( - dc, new_ctx, new_ctx->streams, new_ctx->stream_count); - return result; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c index 2597e3fd562b..e006f816ff2f 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c @@ -265,6 +265,9 @@ struct dc_state *dc_state_create_copy(struct dc_state *src_state) dc_state_copy_internal(new_state, src_state); #ifdef CONFIG_DRM_AMD_DC_FP + new_state->bw_ctx.dml2 = NULL; + new_state->bw_ctx.dml2_dc_power_source = NULL; + if (src_state->bw_ctx.dml2 && !dml2_create_copy(&new_state->bw_ctx.dml2, src_state->bw_ctx.dml2)) { dc_state_release(new_state); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index 9a406d74c0dd..55dc482d9b36 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -292,7 +292,9 @@ bool dc_stream_set_cursor_attributes( * 2. If not subvp high refresh, for single display cases, if resolution is >= 5K and refresh rate < 120hz * 3. If not subvp high refresh, for multi display cases, if resolution is >= 4K and refresh rate < 120hz */ - if (dc->debug.allow_sw_cursor_fallback && attributes->height * attributes->width * 4 > 16384) { + if (dc->debug.allow_sw_cursor_fallback && + attributes->height * attributes->width * 4 > 16384 && + !stream->hw_cursor_req) { if (check_subvp_sw_cursor_fallback_req(dc, stream)) return false; } @@ -421,7 +423,6 @@ bool dc_stream_program_cursor_position( /* apply/update visual confirm */ if (dc->debug.visual_confirm == VISUAL_CONFIRM_HW_CURSOR) { /* update software state */ - uint32_t color_value = MAX_TG_COLOR_VALUE; int i; for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -429,15 +430,7 @@ bool dc_stream_program_cursor_position( /* adjust visual confirm color for all pipes with current stream */ if (stream == pipe_ctx->stream) { - if (stream->cursor_position.enable) { - pipe_ctx->visual_confirm_color.color_r_cr = color_value; - pipe_ctx->visual_confirm_color.color_g_y = 0; - pipe_ctx->visual_confirm_color.color_b_cb = 0; - } else { - pipe_ctx->visual_confirm_color.color_r_cr = 0; - pipe_ctx->visual_confirm_color.color_g_y = 0; - pipe_ctx->visual_confirm_color.color_b_cb = color_value; - } + get_cursor_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); /* programming hardware */ if (pipe_ctx->plane_state) @@ -819,12 +812,12 @@ void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream) stream->dst.height, stream->output_color_space); DC_LOG_DC( - "\tpix_clk_khz: %d, h_total: %d, v_total: %d, pixelencoder:%d, displaycolorDepth:%d\n", + "\tpix_clk_khz: %d, h_total: %d, v_total: %d, pixel_encoding:%s, color_depth:%s\n", stream->timing.pix_clk_100hz / 10, stream->timing.h_total, stream->timing.v_total, - stream->timing.pixel_encoding, - stream->timing.display_color_depth); + dc_pixel_encoding_to_str(stream->timing.pixel_encoding), + dc_color_depth_to_str(stream->timing.display_color_depth)); DC_LOG_DC( "\tlink: %d\n", stream->link->link_index); diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 3992ad73165b..e9b9126c0401 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -55,7 +55,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.301" +#define DC_VER "3.2.310" #define MAX_SURFACES 3 #define MAX_PLANES 6 @@ -225,6 +225,11 @@ struct dc_dmub_caps { bool subvp_psr; bool gecc_enable; uint8_t fams_ver; + bool aux_backlight_support; +}; + +struct dc_scl_caps { + bool sharpener_support; }; struct dc_caps { @@ -285,6 +290,7 @@ struct dc_caps { uint16_t subvp_vertical_int_margin_us; bool seamless_odm; uint32_t max_v_total; + bool vtotal_limited_by_fp2; uint32_t max_disp_clock_khz_at_vmin; uint8_t subvp_drr_vblank_start_margin_us; bool cursor_not_scaled; @@ -292,6 +298,7 @@ struct dc_caps { bool sequential_ono; /* Conservative limit for DCC cases which require ODM4:1 to support*/ uint32_t dcc_plane_width_limit; + struct dc_scl_caps scl_caps; }; struct dc_bug_wa { @@ -463,6 +470,7 @@ struct dc_config { unsigned int enable_fpo_flicker_detection; bool disable_hbr_audio_dp2; bool consolidated_dpia_dp_lt; + bool set_pipe_unlock_order; }; enum visual_confirm { @@ -862,7 +870,6 @@ struct dc_debug_options { bool sanity_checks; bool max_disp_clk; bool surface_trace; - bool timing_trace; bool clock_trace; bool validation_trace; bool bandwidth_calcs_trace; @@ -1061,6 +1068,8 @@ struct dc_debug_options { unsigned int sharpen_policy; unsigned int scale_to_sharpness_policy; bool skip_full_updated_if_possible; + unsigned int enable_oled_edp_power_up_opt; + bool enable_hblank_borrow; }; @@ -1253,7 +1262,6 @@ union surface_update_flags { uint32_t rotation_change:1; uint32_t swizzle_change:1; uint32_t scaling_change:1; - uint32_t clip_size_change: 1; uint32_t position_change:1; uint32_t in_transfer_func_change:1; uint32_t input_csc_change:1; @@ -1355,6 +1363,7 @@ struct dc_plane_state { enum mpcc_movable_cm_location mcm_location; struct dc_csc_transform cursor_csc_color_matrix; bool adaptive_sharpness_en; + int adaptive_sharpness_policy; int sharpness_level; enum linear_light_scaling linear_light_scaling; unsigned int sdr_white_level_nits; @@ -1461,6 +1470,7 @@ struct dc { struct dc_scratch_space current_state; struct dc_scratch_space new_state; struct dc_stream_state temp_stream; // Used so we don't need to allocate stream on the stack + bool pipes_to_unlock_first[MAX_PIPES]; /* Any of the pipes indicated here should be unlocked first */ } scratch; struct dml2_configuration_options dml2_options; @@ -1513,7 +1523,7 @@ struct dc_surface_update { * change cm2_params.component_settings: Full update * change cm2_params.cm2_luts: Fast update */ - struct dc_cm2_parameters *cm2_params; + const struct dc_cm2_parameters *cm2_params; const struct dc_csc_transform *cursor_csc_color_matrix; unsigned int sdr_white_level_nits; }; @@ -1770,7 +1780,6 @@ struct dc_link { bool dongle_mode_timing_override; bool blank_stream_on_ocs_change; bool read_dpcd204h_on_irq_hpd; - bool disable_assr_for_uhbr; } wa_flags; struct link_mst_stream_allocation_table mst_stream_alloc_table; @@ -1786,6 +1795,7 @@ struct dc_link { // BW ALLOCATON USB4 ONLY struct dc_dpia_bw_alloc dpia_bw_alloc_config; bool skip_implict_edp_power_control; + enum backlight_control_type backlight_control_type; }; /* Return an enumerated dc_link. @@ -2203,8 +2213,7 @@ void dc_link_edp_panel_backlight_power_on(struct dc_link *link, * and 16 bit fractional, where 1.0 is max backlight value. */ bool dc_link_set_backlight_level(const struct dc_link *dc_link, - uint32_t backlight_pwm_u16_16, - uint32_t frame_ramp); + struct set_backlight_level_params *backlight_level_params); /* Set/get nits-based backlight level of an embedded panel (eDP, LVDS). */ bool dc_link_set_backlight_level_nits(struct dc_link *link, @@ -2543,6 +2552,8 @@ struct dc_power_profile { struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state *context); +unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context); + /* DSC Interfaces */ #include "dc_dsc.h" diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index 1e7de0f03290..f90fc154549a 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -519,7 +519,8 @@ void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pi union dmub_rb_cmd cmd = { 0 }; unsigned int panel_inst = 0; - if (!dc_get_edp_link_panel_inst(dc, pipe_ctx->stream->link, &panel_inst)) + if (!dc_get_edp_link_panel_inst(dc, pipe_ctx->stream->link, &panel_inst) && + dc->debug.visual_confirm == VISUAL_CONFIRM_DISABLE) return; memset(&cmd, 0, sizeof(cmd)); @@ -1012,7 +1013,6 @@ static bool dc_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx) r2 = test_pipe->plane_res.scl_data.recout; r2_r = r2.x + r2.width; r2_b = r2.y + r2.height; - split_pipe = test_pipe; /** * There is another half plane on same layer because of @@ -1294,6 +1294,8 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) memset(&new_signals, 0, sizeof(new_signals)); + new_signals.bits.allow_idle = 1; /* always set */ + if (dc->config.disable_ips == DMUB_IPS_ENABLE || dc->config.disable_ips == DMUB_IPS_DISABLE_DYNAMIC) { new_signals.bits.allow_pg = 1; @@ -1389,7 +1391,7 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) */ dc_dmub_srv->needs_idle_wake = false; - if (prev_driver_signals.bits.allow_ips2 && + if ((prev_driver_signals.bits.allow_ips2 || prev_driver_signals.all == 0) && (!dc->debug.optimize_ips_handshake || ips_fw->signals.bits.ips2_commit || !ips_fw->signals.bits.in_idle)) { DC_LOG_IPS( @@ -1450,7 +1452,7 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) } dc_dmub_srv_notify_idle(dc, false); - if (prev_driver_signals.bits.allow_ips1) { + if (prev_driver_signals.bits.allow_ips1 || prev_driver_signals.all == 0) { DC_LOG_IPS( "wait for IPS1 commit clear (ips1_commit=%u ips2_commit=%u)", ips_fw->signals.bits.ips1_commit, @@ -1862,3 +1864,81 @@ void dc_dmub_srv_fams2_passthrough_flip( dm_execute_dmub_cmd_list(dc->ctx, num_cmds, cmds, DM_DMUB_WAIT_TYPE_WAIT); } } + +bool dc_dmub_srv_ips_residency_cntl(struct dc_dmub_srv *dc_dmub_srv, bool start_measurement) +{ + bool result; + + if (!dc_dmub_srv || !dc_dmub_srv->dmub) + return false; + + result = dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__IPS_RESIDENCY, + start_measurement, NULL, DM_DMUB_WAIT_TYPE_WAIT); + + return result; +} + +void dc_dmub_srv_ips_query_residency_info(struct dc_dmub_srv *dc_dmub_srv, struct ips_residency_info *output) +{ + uint32_t i; + enum dmub_gpint_command command_code; + + if (!dc_dmub_srv || !dc_dmub_srv->dmub) + return; + + switch (output->ips_mode) { + case DMUB_IPS_MODE_IPS1_MAX: + command_code = DMUB_GPINT__GET_IPS1_HISTOGRAM_COUNTER; + break; + case DMUB_IPS_MODE_IPS2: + command_code = DMUB_GPINT__GET_IPS2_HISTOGRAM_COUNTER; + break; + case DMUB_IPS_MODE_IPS1_RCG: + command_code = DMUB_GPINT__GET_IPS1_RCG_HISTOGRAM_COUNTER; + break; + case DMUB_IPS_MODE_IPS1_ONO2_ON: + command_code = DMUB_GPINT__GET_IPS1_ONO2_ON_HISTOGRAM_COUNTER; + break; + default: + command_code = DMUB_GPINT__INVALID_COMMAND; + break; + } + + if (command_code == DMUB_GPINT__INVALID_COMMAND) + return; + + // send gpint commands and wait for ack + if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_PERCENT, + (uint16_t)(output->ips_mode), + &output->residency_percent, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) + output->residency_percent = 0; + + if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_ENTRY_COUNTER, + (uint16_t)(output->ips_mode), + &output->entry_counter, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) + output->entry_counter = 0; + + if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_DURATION_US_LO, + (uint16_t)(output->ips_mode), + &output->total_active_time_us[0], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) + output->total_active_time_us[0] = 0; + if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_DURATION_US_HI, + (uint16_t)(output->ips_mode), + &output->total_active_time_us[1], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) + output->total_active_time_us[1] = 0; + + if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_INACTIVE_RESIDENCY_DURATION_US_LO, + (uint16_t)(output->ips_mode), + &output->total_inactive_time_us[0], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) + output->total_inactive_time_us[0] = 0; + if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_INACTIVE_RESIDENCY_DURATION_US_HI, + (uint16_t)(output->ips_mode), + &output->total_inactive_time_us[1], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) + output->total_inactive_time_us[1] = 0; + + // NUM_IPS_HISTOGRAM_BUCKETS = 16 + for (i = 0; i < 16; i++) + if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, command_code, i, &output->histogram[i], + DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) + output->histogram[i] = 0; +} diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h index 42f0cb672d8b..10b48198b7a6 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h @@ -209,4 +209,43 @@ void dc_dmub_srv_fams2_passthrough_flip( struct dc_stream_state *stream, struct dc_surface_update *srf_updates, int surface_count); + +/** + * struct ips_residency_info - struct containing info from dmub_ips_residency_stats + * + * @ips_mode: The mode of IPS that the follow stats appertain to + * @residency_percent: The percentage of time spent in given IPS mode in millipercent + * @entry_counter: The number of entries made in to this IPS state + * @total_active_time_us: uint32_t array of length 2 representing time in the given IPS mode + * in microseconds. Index 0 is lower 32 bits, index 1 is upper 32 bits. + * @total_inactive_time_us: uint32_t array of length 2 representing time outside the given IPS mode + * in microseconds. Index 0 is lower 32 bits, index 1 is upper 32 bits. + * @histogram: Histogram of given IPS state durations - bucket definitions in dmub_ips.c + */ +struct ips_residency_info { + enum dmub_ips_mode ips_mode; + unsigned int residency_percent; + unsigned int entry_counter; + unsigned int total_active_time_us[2]; + unsigned int total_inactive_time_us[2]; + unsigned int histogram[16]; +}; + +/** + * bool dc_dmub_srv_ips_residency_cntl() - Controls IPS residency measurement status + * + * @dc_dmub_srv: The DC DMUB service pointer + * @start_measurement: Describes whether to start or stop measurement + * + * Return: true if GPINT was sent successfully, false otherwise + */ +bool dc_dmub_srv_ips_residency_cntl(struct dc_dmub_srv *dc_dmub_srv, bool start_measurement); + +/** + * bool dc_dmub_srv_ips_query_residency_info() - Queries DMCUB for residency info + * + * @dc_dmub_srv: The DC DMUB service pointer + * @output: Output struct to copy the the residency info to + */ +void dc_dmub_srv_ips_query_residency_info(struct dc_dmub_srv *dc_dmub_srv, struct ips_residency_info *output); #endif /* _DMUB_DC_SRV_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index 41bd95e9177a..8dd6eb044829 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -1166,6 +1166,7 @@ struct dpcd_caps { int8_t branch_dev_name[6]; int8_t branch_hw_revision; int8_t branch_fw_revision[2]; + int8_t branch_vendor_specific_data[4]; bool allow_invalid_MSA_timing_param; bool panel_mode_edp; @@ -1191,6 +1192,7 @@ struct dpcd_caps { struct edp_psr_info psr_info; struct replay_info pr_info; + uint16_t edp_oled_emission_rate; }; union dpcd_sink_ext_caps { @@ -1204,7 +1206,7 @@ union dpcd_sink_ext_caps { uint8_t oled : 1; uint8_t reserved_2 : 1; uint8_t miniled : 1; - uint8_t reserved : 1; + uint8_t emission_output : 1; } bits; uint8_t raw; }; @@ -1358,6 +1360,9 @@ struct dp_trace { #ifndef DP_TUNNELING_IRQ #define DP_TUNNELING_IRQ (1 << 5) #endif +#ifndef DP_BRANCH_VENDOR_SPECIFIC_START +#define DP_BRANCH_VENDOR_SPECIFIC_START 0x50C +#endif /** USB4 DPCD BW Allocation Registers Chapter 10.7 **/ #ifndef DP_TUNNELING_CAPABILITIES #define DP_TUNNELING_CAPABILITIES 0xE000D /* 1.4a */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_plane.h b/drivers/gpu/drm/amd/display/dc/dc_plane.h index 44afcd989224..bd37ec82b42d 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_plane.h +++ b/drivers/gpu/drm/amd/display/dc/dc_plane.h @@ -26,7 +26,6 @@ #ifndef _DC_PLANE_H_ #define _DC_PLANE_H_ -#include "dc.h" #include "dc_hw_types.h" struct dc_plane_state *dc_create_plane_state(const struct dc *dc); diff --git a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c index 603552dbd771..0e310fd48b5c 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c +++ b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c @@ -8,13 +8,13 @@ #include "dcn32/dcn32_dpp.h" #include "dcn401/dcn401_dpp.h" -static struct spl_funcs dcn2_spl_funcs = { +static struct spl_callbacks dcn2_spl_callbacks = { .spl_calc_lb_num_partitions = dscl2_spl_calc_lb_num_partitions, }; -static struct spl_funcs dcn32_spl_funcs = { +static struct spl_callbacks dcn32_spl_callbacks = { .spl_calc_lb_num_partitions = dscl32_spl_calc_lb_num_partitions, }; -static struct spl_funcs dcn401_spl_funcs = { +static struct spl_callbacks dcn401_spl_callbacks = { .spl_calc_lb_num_partitions = dscl401_spl_calc_lb_num_partitions, }; static void populate_splrect_from_rect(struct spl_rect *spl_rect, const struct rect *rect) @@ -38,6 +38,7 @@ static void populate_spltaps_from_taps(struct spl_taps *spl_scaling_quality, spl_scaling_quality->h_taps = scaling_quality->h_taps; spl_scaling_quality->v_taps_c = scaling_quality->v_taps_c; spl_scaling_quality->v_taps = scaling_quality->v_taps; + spl_scaling_quality->integer_scaling = scaling_quality->integer_scaling; } static void populate_taps_from_spltaps(struct scaling_taps *scaling_quality, const struct spl_taps *spl_scaling_quality) @@ -76,16 +77,16 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl // This is used to determine the vtap support switch (plane_state->ctx->dce_version) { case DCN_VERSION_2_0: - spl_in->funcs = &dcn2_spl_funcs; + spl_in->callbacks = dcn2_spl_callbacks; break; case DCN_VERSION_3_2: - spl_in->funcs = &dcn32_spl_funcs; + spl_in->callbacks = dcn32_spl_callbacks; break; case DCN_VERSION_4_01: - spl_in->funcs = &dcn401_spl_funcs; + spl_in->callbacks = dcn401_spl_callbacks; break; default: - spl_in->funcs = &dcn2_spl_funcs; + spl_in->callbacks = dcn2_spl_callbacks; } // Make format field from spl_in point to plane_res scl_data format spl_in->basic_in.format = (enum spl_pixel_format)pipe_ctx->plane_res.scl_data.format; @@ -119,7 +120,7 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl spl_in->odm_slice_index = resource_get_odm_slice_index(pipe_ctx); // Make spl input basic out info output_size width point to stream h active spl_in->basic_out.output_size.width = - stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right; + stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right + pipe_ctx->hblank_borrow; // Make spl input basic out info output_size height point to v active spl_in->basic_out.output_size.height = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top; @@ -187,14 +188,14 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl spl_in->h_active = pipe_ctx->plane_res.scl_data.h_active; spl_in->v_active = pipe_ctx->plane_res.scl_data.v_active; - spl_in->debug.sharpen_policy = (enum sharpen_policy)pipe_ctx->stream->ctx->dc->debug.sharpen_policy; + spl_in->sharpen_policy = (enum sharpen_policy)plane_state->adaptive_sharpness_policy; spl_in->debug.scale_to_sharpness_policy = (enum scale_to_sharpness_policy)pipe_ctx->stream->ctx->dc->debug.scale_to_sharpness_policy; /* Check if it is stream is in fullscreen and if its HDR. * Use this to determine sharpness levels */ - spl_in->is_fullscreen = dm_helpers_is_fullscreen(pipe_ctx->stream->ctx, pipe_ctx->stream); + spl_in->is_fullscreen = pipe_ctx->stream->sharpening_required; spl_in->is_hdr_on = dm_helpers_is_hdr_on(pipe_ctx->stream->ctx, pipe_ctx->stream); spl_in->sdr_white_level_nits = plane_state->sdr_white_level_nits; } diff --git a/drivers/gpu/drm/amd/display/dc/dc_state.h b/drivers/gpu/drm/amd/display/dc/dc_state.h index caa45db50232..db1e63a7d460 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_state.h +++ b/drivers/gpu/drm/amd/display/dc/dc_state.h @@ -26,7 +26,6 @@ #ifndef _DC_STATE_H_ #define _DC_STATE_H_ -#include "dc.h" #include "inc/core_status.h" struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *params); diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index 14ea47eda0c8..413970588a26 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -143,6 +143,7 @@ union stream_update_flags { uint32_t crtc_timing_adjust : 1; uint32_t fams_changed : 1; uint32_t scaler_sharpener : 1; + uint32_t sharpening_required : 1; } bits; uint32_t raw; @@ -310,6 +311,7 @@ struct dc_stream_state { struct luminance_data lumin_data; bool scaler_sharpener_update; + bool sharpening_required; }; #define ABM_LEVEL_IMMEDIATE_DISABLE 255 @@ -356,6 +358,7 @@ struct dc_stream_update { struct dc_cursor_position *cursor_position; bool *hw_cursor_req; bool *scaler_sharpener_update; + bool *sharpening_required; }; bool dc_is_stream_unchanged( diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index 6d7989b751e2..edf4df1d03b5 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -179,6 +179,9 @@ struct dc_panel_patch { unsigned int mst_start_top_delay; unsigned int remove_sink_ext_caps; unsigned int disable_colorimetry; + uint8_t blankstream_before_otg_off; + bool oled_optimize_display_on; + unsigned int force_mst_blocked_discovery; }; struct dc_edid_caps { @@ -922,6 +925,12 @@ struct display_endpoint_id { enum display_endpoint_type ep_type; }; +enum backlight_control_type { + BACKLIGHT_CONTROL_PWM = 0, + BACKLIGHT_CONTROL_VESA_AUX = 1, + BACKLIGHT_CONTROL_AMD_AUX = 2, +}; + #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) struct otg_phy_mux { uint8_t phy_output_num; @@ -1295,4 +1304,31 @@ struct dc_commit_streams_params { enum dc_power_source_type power_source; }; +struct set_backlight_level_params { + /* backlight in pwm */ + uint32_t backlight_pwm_u16_16; + /* brightness ramping */ + uint32_t frame_ramp; + /* backlight control type + * 0: PWM backlight control + * 1: VESA AUX backlight control + * 2: AMD AUX backlight control + */ + enum backlight_control_type control_type; + /* backlight in millinits */ + uint32_t backlight_millinits; + /* transition time in ms */ + uint32_t transition_time_in_ms; + /* minimum luminance in nits */ + uint32_t min_luminance; + /* maximum luminance in nits */ + uint32_t max_luminance; + /* minimum backlight in pwm */ + uint32_t min_backlight_pwm; + /* maximum backlight in pwm */ + uint32_t max_backlight_pwm; + /* AUX HW instance */ + uint8_t aux_inst; +}; + #endif /* DC_TYPES_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c index 838d72eaa87f..b363f5360818 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c @@ -1392,10 +1392,10 @@ static void dccg35_set_dtbclk_dto( /* The recommended programming sequence to enable DTBCLK DTO to generate * valid pixel HPO DPSTREAM ENCODER, specifies that DTO source select should - * be set only after DTO is enabled + * be set only after DTO is enabled. + * PIPEx_DTO_SRC_SEL should not be programmed during DTBCLK update since OTG may still be on, and the + * programming is handled in program_pix_clk() regardless, so it can be removed from here. */ - REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst], - PIPE_DTO_SRC_SEL[params->otg_inst], 2); } else { switch (params->otg_inst) { case 0: @@ -1412,9 +1412,12 @@ static void dccg35_set_dtbclk_dto( break; } - REG_UPDATE_2(OTG_PIXEL_RATE_CNTL[params->otg_inst], - DTBCLK_DTO_ENABLE[params->otg_inst], 0, - PIPE_DTO_SRC_SEL[params->otg_inst], params->is_hdmi ? 0 : 1); + /** + * PIPEx_DTO_SRC_SEL should not be programmed during DTBCLK update since OTG may still be on, and the + * programming is handled in program_pix_clk() regardless, so it can be removed from here. + */ + REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst], + DTBCLK_DTO_ENABLE[params->otg_inst], 0); REG_WRITE(DTBCLK_DTO_MODULO[params->otg_inst], 0); REG_WRITE(DTBCLK_DTO_PHASE[params->otg_inst], 0); diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c index 0b889004509a..d3e46c3cfa57 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c @@ -580,9 +580,6 @@ static void dccg401_set_dpstreamclk( int otg_inst, int dp_hpo_inst) { - /* set the dtbclk_p source */ - dccg401_set_dtbclk_p_src(dccg, src, otg_inst); - /* enabled to select one of the DTBCLKs for pipe */ if (src == REFCLK) dccg401_disable_dpstreamclk(dccg, dp_hpo_inst); @@ -805,33 +802,6 @@ static void dccg401_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); - switch (link_enc_inst) { - case 0: - REG_UPDATE(SYMCLKA_CLOCK_ENABLE, - SYMCLKA_CLOCK_ENABLE, 1); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKA_ROOT_GATE_DISABLE, 1); - break; - case 1: - REG_UPDATE(SYMCLKB_CLOCK_ENABLE, - SYMCLKB_CLOCK_ENABLE, 1); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKB_ROOT_GATE_DISABLE, 1); - break; - case 2: - REG_UPDATE(SYMCLKC_CLOCK_ENABLE, - SYMCLKC_CLOCK_ENABLE, 1); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKC_ROOT_GATE_DISABLE, 1); - break; - case 3: - REG_UPDATE(SYMCLKD_CLOCK_ENABLE, - SYMCLKD_CLOCK_ENABLE, 1); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKD_ROOT_GATE_DISABLE, 1); - break; - } - switch (stream_enc_inst) { case 0: REG_UPDATE_2(SYMCLKA_CLOCK_ENABLE, @@ -864,37 +834,8 @@ static void dccg401_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst } } -/*get other front end connected to this backend*/ -static uint8_t dccg401_get_number_enabled_symclk_fe_connected_to_be(struct dccg *dccg, uint32_t link_enc_inst) -{ - uint8_t num_enabled_symclk_fe = 0; - uint32_t fe_clk_en[4] = {0}, be_clk_sel[4] = {0}; - struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); - uint8_t i; - - REG_GET_2(SYMCLKA_CLOCK_ENABLE, SYMCLKA_FE_EN, &fe_clk_en[0], - SYMCLKA_FE_SRC_SEL, &be_clk_sel[0]); - - REG_GET_2(SYMCLKB_CLOCK_ENABLE, SYMCLKB_FE_EN, &fe_clk_en[1], - SYMCLKB_FE_SRC_SEL, &be_clk_sel[1]); - - REG_GET_2(SYMCLKC_CLOCK_ENABLE, SYMCLKC_FE_EN, &fe_clk_en[2], - SYMCLKC_FE_SRC_SEL, &be_clk_sel[2]); - - REG_GET_2(SYMCLKD_CLOCK_ENABLE, SYMCLKD_FE_EN, &fe_clk_en[3], - SYMCLKD_FE_SRC_SEL, &be_clk_sel[3]); - - for (i = 0; i < ARRAY_SIZE(fe_clk_en); i++) { - if (fe_clk_en[i] && be_clk_sel[i] == link_enc_inst) - num_enabled_symclk_fe++; - } - - return num_enabled_symclk_fe; -} - static void dccg401_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst) { - uint8_t num_enabled_symclk_fe = 0; struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); switch (stream_enc_inst) { @@ -919,31 +860,6 @@ static void dccg401_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_ins SYMCLKD_FE_SRC_SEL, 0); break; } - - /*check other enabled symclk fe connected to this be */ - num_enabled_symclk_fe = dccg401_get_number_enabled_symclk_fe_connected_to_be(dccg, link_enc_inst); - /*only turn off backend clk if other front ends attached to this backend are all off, - for mst, only turn off the backend if this is the last front end*/ - if (num_enabled_symclk_fe == 0) { - switch (link_enc_inst) { - case 0: - REG_UPDATE(SYMCLKA_CLOCK_ENABLE, - SYMCLKA_CLOCK_ENABLE, 0); - break; - case 1: - REG_UPDATE(SYMCLKB_CLOCK_ENABLE, - SYMCLKB_CLOCK_ENABLE, 0); - break; - case 2: - REG_UPDATE(SYMCLKC_CLOCK_ENABLE, - SYMCLKC_CLOCK_ENABLE, 0); - break; - case 3: - REG_UPDATE(SYMCLKD_CLOCK_ENABLE, - SYMCLKD_CLOCK_ENABLE, 0); - break; - } - } } static const struct dccg_funcs dccg401_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c index 5c2825bc9a87..d199e4ed2e59 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c @@ -277,7 +277,6 @@ static void dce110_stream_encoder_dp_set_stream_attribute( uint32_t misc1 = 0; uint32_t h_blank; uint32_t h_back_porch; - uint8_t synchronous_clock = 0; /* asynchronous mode */ uint8_t colorimetry_bpc; uint8_t dynamic_range_rgb = 0; /*full range*/ uint8_t dynamic_range_ycbcr = 1; /*bt709*/ @@ -380,7 +379,6 @@ static void dce110_stream_encoder_dp_set_stream_attribute( break; } - misc0 = misc0 | synchronous_clock; misc0 = colorimetry_bpc << 5; if (REG(DP_MSA_TIMING_PARAM1)) { diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c index db7557a1c613..8a3fbf95c48f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c @@ -76,7 +76,6 @@ UNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C__GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C_MAS mmUNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C, value); - temp = 0; value = 0; temp = address.low_part >> UNP_GRPH_PRIMARY_SURFACE_ADDRESS_C__GRPH_PRIMARY_SURFACE_ADDRESS_C__SHIFT; @@ -112,7 +111,6 @@ UNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L__GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L_MAS mmUNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L, value); - temp = 0; value = 0; temp = address.low_part >> UNP_GRPH_PRIMARY_SURFACE_ADDRESS_L__GRPH_PRIMARY_SURFACE_ADDRESS_L__SHIFT; diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c b/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c index 8db9f7514466..889f314cac65 100644 --- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c @@ -717,7 +717,7 @@ static struct link_encoder *dce60_link_encoder_create( kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); int link_regs_id; - if (!enc110) + if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; link_regs_id = diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c index eaed5d1c398a..dcd2cdfe91eb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c @@ -365,23 +365,18 @@ bool cm_helper_translate_curve_to_hw_format(struct dc_context *ctx, region_start = -MAX_LOW_POINT; region_end = NUMBER_REGIONS - MAX_LOW_POINT; } else { - /* 11 segments - * segment is from 2^-10 to 2^1 + /* 13 segments + * segment is from 2^-12 to 2^0 * There are less than 256 points, for optimization */ - seg_distr[0] = 3; - seg_distr[1] = 4; - seg_distr[2] = 4; - seg_distr[3] = 4; - seg_distr[4] = 4; - seg_distr[5] = 4; - seg_distr[6] = 4; - seg_distr[7] = 4; - seg_distr[8] = 4; - seg_distr[9] = 4; - seg_distr[10] = 1; - - region_start = -10; + const uint8_t SEG_COUNT = 12; + + for (i = 0; i < SEG_COUNT; i++) + seg_distr[i] = 4; + + seg_distr[SEG_COUNT] = 1; + + region_start = -SEG_COUNT; region_end = 1; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c index 05df502a54f2..88cf47a5ea75 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c @@ -46,7 +46,7 @@ #include "clk_mgr.h" __printf(3, 4) -unsigned int snprintf_count(char *pbuf, unsigned int bufsize, char *fmt, ...) +unsigned int snprintf_count(char *pbuf, unsigned int bufsize, const char *fmt, ...) { int ret_vsnprintf; unsigned int chars_printed; diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c index f31f0e3abfc0..0690c346f2c5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c @@ -140,23 +140,18 @@ bool cm3_helper_translate_curve_to_hw_format( region_start = -MAX_LOW_POINT; region_end = NUMBER_REGIONS - MAX_LOW_POINT; } else { - /* 11 segments - * segment is from 2^-10 to 2^0 + /* 13 segments + * segment is from 2^-12 to 2^0 * There are less than 256 points, for optimization */ - seg_distr[0] = 3; - seg_distr[1] = 4; - seg_distr[2] = 4; - seg_distr[3] = 4; - seg_distr[4] = 4; - seg_distr[5] = 4; - seg_distr[6] = 4; - seg_distr[7] = 4; - seg_distr[8] = 4; - seg_distr[9] = 4; - seg_distr[10] = 1; - - region_start = -10; + const uint8_t SEG_COUNT = 12; + + for (i = 0; i < SEG_COUNT; i++) + seg_distr[i] = 4; + + seg_distr[SEG_COUNT] = 1; + + region_start = -SEG_COUNT; region_end = 1; } @@ -285,157 +280,6 @@ bool cm3_helper_translate_curve_to_hw_format( return true; } -#define NUM_DEGAMMA_REGIONS 12 - - -bool cm3_helper_translate_curve_to_degamma_hw_format( - const struct dc_transfer_func *output_tf, - struct pwl_params *lut_params) -{ - struct curve_points3 *corner_points; - struct pwl_result_data *rgb_resulted; - struct pwl_result_data *rgb; - struct pwl_result_data *rgb_plus_1; - - int32_t region_start, region_end; - int32_t i; - uint32_t j, k, seg_distr[MAX_REGIONS_NUMBER], increment, start_index, hw_points; - - if (output_tf == NULL || lut_params == NULL || output_tf->type == TF_TYPE_BYPASS) - return false; - - corner_points = lut_params->corner_points; - rgb_resulted = lut_params->rgb_resulted; - hw_points = 0; - - memset(lut_params, 0, sizeof(struct pwl_params)); - memset(seg_distr, 0, sizeof(seg_distr)); - - region_start = -NUM_DEGAMMA_REGIONS; - region_end = 0; - - - for (i = region_end - region_start; i < MAX_REGIONS_NUMBER ; i++) - seg_distr[i] = -1; - /* 12 segments - * segments are from 2^-12 to 0 - */ - for (i = 0; i < NUM_DEGAMMA_REGIONS ; i++) - seg_distr[i] = 4; - - for (k = 0; k < MAX_REGIONS_NUMBER; k++) { - if (seg_distr[k] != -1) - hw_points += (1 << seg_distr[k]); - } - - j = 0; - for (k = 0; k < (region_end - region_start); k++) { - increment = NUMBER_SW_SEGMENTS / (1 << seg_distr[k]); - start_index = (region_start + k + MAX_LOW_POINT) * - NUMBER_SW_SEGMENTS; - for (i = start_index; i < start_index + NUMBER_SW_SEGMENTS; - i += increment) { - if (j == hw_points - 1) - break; - if (i >= TRANSFER_FUNC_POINTS) - return false; - rgb_resulted[j].red = output_tf->tf_pts.red[i]; - rgb_resulted[j].green = output_tf->tf_pts.green[i]; - rgb_resulted[j].blue = output_tf->tf_pts.blue[i]; - j++; - } - } - - /* last point */ - start_index = (region_end + MAX_LOW_POINT) * NUMBER_SW_SEGMENTS; - rgb_resulted[hw_points - 1].red = output_tf->tf_pts.red[start_index]; - rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index]; - rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index]; - - corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2), - dc_fixpt_from_int(region_start)); - corner_points[0].green.x = corner_points[0].red.x; - corner_points[0].blue.x = corner_points[0].red.x; - corner_points[1].red.x = dc_fixpt_pow(dc_fixpt_from_int(2), - dc_fixpt_from_int(region_end)); - corner_points[1].green.x = corner_points[1].red.x; - corner_points[1].blue.x = corner_points[1].red.x; - - corner_points[0].red.y = rgb_resulted[0].red; - corner_points[0].green.y = rgb_resulted[0].green; - corner_points[0].blue.y = rgb_resulted[0].blue; - - /* see comment above, m_arrPoints[1].y should be the Y value for the - * region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1) - */ - corner_points[1].red.y = rgb_resulted[hw_points - 1].red; - corner_points[1].green.y = rgb_resulted[hw_points - 1].green; - corner_points[1].blue.y = rgb_resulted[hw_points - 1].blue; - corner_points[1].red.slope = dc_fixpt_zero; - corner_points[1].green.slope = dc_fixpt_zero; - corner_points[1].blue.slope = dc_fixpt_zero; - - if (output_tf->tf == TRANSFER_FUNCTION_PQ) { - /* for PQ, we want to have a straight line from last HW X point, - * and the slope to be such that we hit 1.0 at 10000 nits. - */ - const struct fixed31_32 end_value = - dc_fixpt_from_int(125); - - corner_points[1].red.slope = dc_fixpt_div( - dc_fixpt_sub(dc_fixpt_one, corner_points[1].red.y), - dc_fixpt_sub(end_value, corner_points[1].red.x)); - corner_points[1].green.slope = dc_fixpt_div( - dc_fixpt_sub(dc_fixpt_one, corner_points[1].green.y), - dc_fixpt_sub(end_value, corner_points[1].green.x)); - corner_points[1].blue.slope = dc_fixpt_div( - dc_fixpt_sub(dc_fixpt_one, corner_points[1].blue.y), - dc_fixpt_sub(end_value, corner_points[1].blue.x)); - } - - lut_params->hw_points_num = hw_points; - - k = 0; - for (i = 1; i < MAX_REGIONS_NUMBER; i++) { - if (seg_distr[k] != -1) { - lut_params->arr_curve_points[k].segments_num = - seg_distr[k]; - lut_params->arr_curve_points[i].offset = - lut_params->arr_curve_points[k].offset + (1 << seg_distr[k]); - } - k++; - } - - if (seg_distr[k] != -1) - lut_params->arr_curve_points[k].segments_num = seg_distr[k]; - - rgb = rgb_resulted; - rgb_plus_1 = rgb_resulted + 1; - - i = 1; - while (i != hw_points + 1) { - if (dc_fixpt_lt(rgb_plus_1->red, rgb->red)) - rgb_plus_1->red = rgb->red; - if (dc_fixpt_lt(rgb_plus_1->green, rgb->green)) - rgb_plus_1->green = rgb->green; - if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue)) - rgb_plus_1->blue = rgb->blue; - - rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red); - rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green); - rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue); - - ++rgb_plus_1; - ++rgb; - ++i; - } - cm3_helper_convert_to_custom_float(rgb_resulted, - lut_params->corner_points, - hw_points, false); - - return true; -} - bool cm3_helper_convert_to_custom_float( struct pwl_result_data *rgb_resulted, struct curve_points3 *corner_points, diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c index f496e952ceec..d01a8b8f9595 100644 --- a/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c @@ -255,7 +255,6 @@ void enc1_stream_encoder_dp_set_stream_attribute( uint32_t misc1 = 0; uint32_t h_blank; uint32_t h_back_porch; - uint8_t synchronous_clock = 0; /* asynchronous mode */ uint8_t colorimetry_bpc; uint8_t dp_pixel_encoding = 0; uint8_t dp_component_depth = 0; @@ -362,7 +361,6 @@ void enc1_stream_encoder_dp_set_stream_attribute( break; } - misc0 = misc0 | synchronous_clock; misc0 = colorimetry_bpc << 5; switch (output_color_space) { diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c index 5b343f745cf3..ae81451a3a72 100644 --- a/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c @@ -83,6 +83,15 @@ void enc314_disable_fifo(struct stream_encoder *enc) REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 0); } +static bool enc314_is_fifo_enabled(struct stream_encoder *enc) +{ + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + uint32_t reset_val; + + REG_GET(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, &reset_val); + return (reset_val != 0); +} + void enc314_dp_set_odm_combine( struct stream_encoder *enc, bool odm_combine) @@ -468,6 +477,7 @@ static const struct stream_encoder_funcs dcn314_str_enc_funcs = { .enable_fifo = enc314_enable_fifo, .disable_fifo = enc314_disable_fifo, + .is_fifo_enabled = enc314_is_fifo_enabled, .set_input_mode = enc314_set_dig_input_mode, }; diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c index 0a27e0942a12..098c2a01a850 100644 --- a/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c @@ -447,7 +447,6 @@ void enc401_stream_encoder_dp_set_stream_attribute( uint32_t misc1 = 0; uint32_t h_blank; uint32_t h_back_porch; - uint8_t synchronous_clock = 0; /* asynchronous mode */ uint8_t colorimetry_bpc; uint8_t dp_pixel_encoding = 0; uint8_t dp_component_depth = 0; @@ -603,7 +602,6 @@ void enc401_stream_encoder_dp_set_stream_attribute( break; } - misc0 = misc0 | synchronous_clock; misc0 = colorimetry_bpc << 5; switch (output_color_space) { diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h index 9405c47ee2a9..f81e5a4e1d6d 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_services.h +++ b/drivers/gpu/drm/amd/display/dc/dm_services.h @@ -143,7 +143,7 @@ void generic_reg_wait(const struct dc_context *ctx, unsigned int delay_between_poll_us, unsigned int time_out_num_tries, const char *func_name, int line); -unsigned int snprintf_count(char *pBuf, unsigned int bufSize, char *fmt, ...); +unsigned int snprintf_count(char *pBuf, unsigned int bufSize, const char *fmt, ...); /* These macros need to be used with soc15 registers in order to retrieve * the actual offset. diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c index 565f3c492477..0c8c4a080c50 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c @@ -785,12 +785,9 @@ static bool CalculatePrefetchSchedule( if (MyError) { *PrefetchBandwidth = 0; - TimeForFetchingMetaPTE = 0; - TimeForFetchingRowInVBlank = 0; *DestinationLinesToRequestVMInVBlank = 0; *DestinationLinesToRequestRowInVBlank = 0; *DestinationLinesForPrefetch = 0; - LinesToRequestPrefetchPixelData = 0; *VRatioPrefetchY = 0; *VRatioPrefetchC = 0; *RequiredPrefetchPixDataBW = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c index 9d6675ecc5f1..c935903b68e1 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c @@ -845,12 +845,9 @@ static bool CalculatePrefetchSchedule( if (MyError) { *PrefetchBandwidth = 0; - TimeForFetchingMetaPTE = 0; - TimeForFetchingRowInVBlank = 0; *DestinationLinesToRequestVMInVBlank = 0; *DestinationLinesToRequestRowInVBlank = 0; *DestinationLinesForPrefetch = 0; - LinesToRequestPrefetchPixelData = 0; *VRatioPrefetchY = 0; *VRatioPrefetchC = 0; *RequiredPrefetchPixDataBW = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c index 4fce64a030b6..390c1a77fda6 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c @@ -443,8 +443,6 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib, blk_bytes = surf_linear ? 256 : get_blk_size_bytes((enum source_macro_tile_size) macro_tile_size); log2_blk_bytes = dml_log2((double) blk_bytes); - log2_blk_height = 0; - log2_blk_width = 0; // remember log rule // "+" in log is multiply @@ -491,8 +489,6 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib, - log2_meta_req_height; meta_req_width = 1 << log2_meta_req_width; meta_req_height = 1 << log2_meta_req_height; - log2_meta_row_height = 0; - meta_row_width_ub = 0; // the dimensions of a meta row are meta_row_width x meta_row_height in elements. // calculate upper bound of the meta_row_width diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c index 3fa9a5da02f6..843d6004258c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c @@ -443,8 +443,6 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib, blk_bytes = surf_linear ? 256 : get_blk_size_bytes((enum source_macro_tile_size) macro_tile_size); log2_blk_bytes = dml_log2((double) blk_bytes); - log2_blk_height = 0; - log2_blk_width = 0; // remember log rule // "+" in log is multiply @@ -491,8 +489,6 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib, - log2_meta_req_height; meta_req_width = 1 << log2_meta_req_width; meta_req_height = 1 << log2_meta_req_height; - log2_meta_row_height = 0; - meta_row_width_ub = 0; // the dimensions of a meta row are meta_row_width x meta_row_height in elements. // calculate upper bound of the meta_row_width diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c index eb3ed965e48b..cd8cca651419 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c @@ -1049,12 +1049,9 @@ static bool CalculatePrefetchSchedule( if (MyError) { *PrefetchBandwidth = 0; - TimeForFetchingMetaPTE = 0; - TimeForFetchingRowInVBlank = 0; *DestinationLinesToRequestVMInVBlank = 0; *DestinationLinesToRequestRowInVBlank = 0; *DestinationLinesForPrefetch = 0; - LinesToRequestPrefetchPixelData = 0; *VRatioPrefetchY = 0; *VRatioPrefetchC = 0; *RequiredPrefetchPixDataBWLuma = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c index 9e1c18b90805..5718000627b0 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c @@ -435,8 +435,6 @@ static void get_meta_and_pte_attr( blk_bytes = surf_linear ? 256 : get_blk_size_bytes((enum source_macro_tile_size) macro_tile_size); log2_blk_bytes = dml_log2((double) blk_bytes); - log2_blk_height = 0; - log2_blk_width = 0; // remember log rule // "+" in log is multiply @@ -485,8 +483,6 @@ static void get_meta_and_pte_attr( - log2_meta_req_height; meta_req_width = 1 << log2_meta_req_width; meta_req_height = 1 << log2_meta_req_height; - log2_meta_row_height = 0; - meta_row_width_ub = 0; // the dimensions of a meta row are meta_row_width x meta_row_height in elements. // calculate upper bound of the meta_row_width diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c index 1c10ba4dcdde..cee1b351e105 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c @@ -1280,12 +1280,9 @@ static bool CalculatePrefetchSchedule( if (MyError) { *PrefetchBandwidth = 0; - TimeForFetchingMetaPTE = 0; - TimeForFetchingRowInVBlank = 0; *DestinationLinesToRequestVMInVBlank = 0; *DestinationLinesToRequestRowInVBlank = 0; *DestinationLinesForPrefetch = 0; - LinesToRequestPrefetchPixelData = 0; *VRatioPrefetchY = 0; *VRatioPrefetchC = 0; *RequiredPrefetchPixDataBWLuma = 0; @@ -1775,15 +1772,6 @@ static unsigned int CalculateVMAndRowBytes( *PixelPTEReqWidth = 32768.0 / BytePerPixel; *PTERequestSize = 64; FractionOfPTEReturnDrop = 0; - } else if (MacroTileSizeBytes == 4096) { - PixelPTEReqHeightPTEs = 1; - *PixelPTEReqHeight = MacroTileHeight; - *PixelPTEReqWidth = 8 * *MacroTileWidth; - *PTERequestSize = 64; - if (ScanDirection != dm_vert) - FractionOfPTEReturnDrop = 0; - else - FractionOfPTEReturnDrop = 7.0 / 8; } else if (GPUVMMinPageSize == 4 && MacroTileSizeBytes > 4096) { PixelPTEReqHeightPTEs = 16; *PixelPTEReqHeight = 16 * BlockHeight256Bytes; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c index b28fcc8608ff..76d3bb3c9155 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c @@ -392,8 +392,6 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib, blk_bytes = surf_linear ? 256 : get_blk_size_bytes((enum source_macro_tile_size) macro_tile_size); log2_blk_bytes = dml_log2((double)blk_bytes); - log2_blk_height = 0; - log2_blk_width = 0; // remember log rule // "+" in log is multiply @@ -464,8 +462,6 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib, - log2_meta_req_height; meta_req_width = 1 << log2_meta_req_width; meta_req_height = 1 << log2_meta_req_height; - log2_meta_row_height = 0; - meta_row_width_ub = 0; // the dimensions of a meta row are meta_row_width x meta_row_height in elements. // calculate upper bound of the meta_row_width diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c index 2b275e680379..f567a9023682 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c @@ -1444,12 +1444,9 @@ static bool CalculatePrefetchSchedule( if (MyError) { *PrefetchBandwidth = 0; - TimeForFetchingMetaPTE = 0; - TimeForFetchingRowInVBlank = 0; *DestinationLinesToRequestVMInVBlank = 0; *DestinationLinesToRequestRowInVBlank = 0; *DestinationLinesForPrefetch = 0; - LinesToRequestPrefetchPixelData = 0; *VRatioPrefetchY = 0; *VRatioPrefetchC = 0; *RequiredPrefetchPixDataBWLuma = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c index b57b095cd4a8..c46bda2141ac 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c @@ -413,8 +413,6 @@ static void get_meta_and_pte_attr( log2_blk256_height = dml_log2((double) blk256_height); blk_bytes = surf_linear ? 256 : get_blk_size_bytes((enum source_macro_tile_size) macro_tile_size); log2_blk_bytes = dml_log2((double) blk_bytes); - log2_blk_height = 0; - log2_blk_width = 0; // remember log rule // "+" in log is multiply @@ -481,8 +479,6 @@ static void get_meta_and_pte_attr( log2_meta_req_width = log2_meta_req_bytes + 8 - log2_bytes_per_element - log2_meta_req_height; meta_req_width = 1 << log2_meta_req_width; meta_req_height = 1 << log2_meta_req_height; - log2_meta_row_height = 0; - meta_row_width_ub = 0; // the dimensions of a meta row are meta_row_width x meta_row_height in elements. // calculate upper bound of the meta_row_width diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c index debfa31583a6..5865e8fa2d8e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c @@ -1461,12 +1461,9 @@ static bool CalculatePrefetchSchedule( if (MyError) { *PrefetchBandwidth = 0; - TimeForFetchingMetaPTE = 0; - TimeForFetchingRowInVBlank = 0; *DestinationLinesToRequestVMInVBlank = 0; *DestinationLinesToRequestRowInVBlank = 0; *DestinationLinesForPrefetch = 0; - LinesToRequestPrefetchPixelData = 0; *VRatioPrefetchY = 0; *VRatioPrefetchC = 0; *RequiredPrefetchPixDataBWLuma = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c index 61b3bebf24c9..b7d2a0caec11 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c @@ -501,8 +501,6 @@ static void get_meta_and_pte_attr( log2_blk256_height = dml_log2((double) blk256_height); blk_bytes = surf_linear ? 256 : get_blk_size_bytes((enum source_macro_tile_size) macro_tile_size); log2_blk_bytes = dml_log2((double) blk_bytes); - log2_blk_height = 0; - log2_blk_width = 0; // remember log rule // "+" in log is multiply @@ -569,8 +567,6 @@ static void get_meta_and_pte_attr( log2_meta_req_width = log2_meta_req_bytes + 8 - log2_bytes_per_element - log2_meta_req_height; meta_req_width = 1 << log2_meta_req_width; meta_req_height = 1 << log2_meta_req_height; - log2_meta_row_height = 0; - meta_row_width_ub = 0; // the dimensions of a meta row are meta_row_width x meta_row_height in elements. // calculate upper bound of the meta_row_width diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c index d92fb428ee96..86ac7d59fd32 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c @@ -4097,12 +4097,9 @@ bool dml32_CalculatePrefetchSchedule( if (MyError) { *PrefetchBandwidth = 0; - TimeForFetchingMetaPTE = 0; - TimeForFetchingRowInVBlank = 0; *DestinationLinesToRequestVMInVBlank = 0; *DestinationLinesToRequestRowInVBlank = 0; *DestinationLinesForPrefetch = 0; - LinesToRequestPrefetchPixelData = 0; *VRatioPrefetchY = 0; *VRatioPrefetchC = 0; *RequiredPrefetchPixDataBWLuma = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c index a201dbb743d7..d9e63c4fdd95 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c @@ -204,8 +204,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_51_soc = { .num_states = 8, .sr_exit_time_us = 28.0, .sr_enter_plus_exit_time_us = 30.0, - .sr_exit_z8_time_us = 250.0, - .sr_enter_plus_exit_z8_time_us = 350.0, + .sr_exit_z8_time_us = 263.0, + .sr_enter_plus_exit_z8_time_us = 363.0, .fclk_change_latency_us = 24.0, .usr_retraining_latency_us = 2, .writeback_latency_us = 12.0, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c index d8bfc85e5dcd..88dc2b97e7bf 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c @@ -559,12 +559,11 @@ static void get_surf_rq_param( const struct _vcs_dpi_display_pipe_source_params_st *pipe_src_param, bool is_chroma) { - bool mode_422 = 0; unsigned int vp_width = 0; unsigned int vp_height = 0; unsigned int data_pitch = 0; unsigned int meta_pitch = 0; - unsigned int ppe = mode_422 ? 2 : 1; + unsigned int ppe = 1; bool surf_linear; bool surf_vert; unsigned int bytes_per_element; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c index d851c081e376..8dabb1ac0b68 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c @@ -1222,6 +1222,7 @@ static dml_bool_t CalculatePrefetchSchedule(struct display_mode_lib_scratch_st * s->dst_y_prefetch_oto = s->Tvm_oto_lines + 2 * s->Tr0_oto_lines + s->Lsw_oto; s->dst_y_prefetch_equ = p->VStartup - (*p->TSetup + dml_max(p->TWait + p->TCalc, *p->Tdmdl)) / s->LineTime - (*p->DSTYAfterScaler + (dml_float_t) *p->DSTXAfterScaler / (dml_float_t)p->myPipe->HTotal); + s->dst_y_prefetch_equ = dml_min(s->dst_y_prefetch_equ, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH #ifdef __DML_VBA_DEBUG__ dml_print("DML::%s: HTotal = %u\n", __func__, p->myPipe->HTotal); diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c index 8697eac1e1f7..c6a5a8614679 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c @@ -339,11 +339,22 @@ void dml21_apply_soc_bb_overrides(struct dml2_initialize_instance_in_out *dml_in // } } +static unsigned int calc_max_hardware_v_total(const struct dc_stream_state *stream) +{ + unsigned int max_hw_v_total = stream->ctx->dc->caps.max_v_total; + + if (stream->ctx->dc->caps.vtotal_limited_by_fp2) { + max_hw_v_total -= stream->timing.v_front_porch + 1; + } + + return max_hw_v_total; +} + static void populate_dml21_timing_config_from_stream_state(struct dml2_timing_cfg *timing, struct dc_stream_state *stream, struct dml2_context *dml_ctx) { - unsigned int hblank_start, vblank_start; + unsigned int hblank_start, vblank_start, min_hardware_refresh_in_uhz; timing->h_active = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right; timing->v_active = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top; @@ -371,11 +382,23 @@ static void populate_dml21_timing_config_from_stream_state(struct dml2_timing_cf - stream->timing.v_border_top - stream->timing.v_border_bottom; timing->drr_config.enabled = stream->ignore_msa_timing_param; - timing->drr_config.min_refresh_uhz = stream->timing.min_refresh_in_uhz; timing->drr_config.drr_active_variable = stream->vrr_active_variable; timing->drr_config.drr_active_fixed = stream->vrr_active_fixed; timing->drr_config.disallowed = !stream->allow_freesync; + /* limit min refresh rate to DC cap */ + min_hardware_refresh_in_uhz = stream->timing.min_refresh_in_uhz; + if (stream->ctx->dc->caps.max_v_total != 0) { + min_hardware_refresh_in_uhz = div64_u64((stream->timing.pix_clk_100hz * 100000000ULL), + (stream->timing.h_total * (long long)calc_max_hardware_v_total(stream))); + } + + if (stream->timing.min_refresh_in_uhz > min_hardware_refresh_in_uhz) { + timing->drr_config.min_refresh_uhz = stream->timing.min_refresh_in_uhz; + } else { + timing->drr_config.min_refresh_uhz = min_hardware_refresh_in_uhz; + } + if (dml_ctx->config.callbacks.get_max_flickerless_instant_vtotal_increase && stream->ctx->dc->config.enable_fpo_flicker_detection == 1) timing->drr_config.max_instant_vtotal_delta = dml_ctx->config.callbacks.get_max_flickerless_instant_vtotal_increase(stream, false); @@ -422,6 +445,21 @@ static void populate_dml21_timing_config_from_stream_state(struct dml2_timing_cf timing->vblank_nom = timing->v_total - timing->v_active; } +/** + * adjust_dml21_hblank_timing_config_from_pipe_ctx - Adjusts the horizontal blanking timing configuration + * based on the pipe context. + * @timing: Pointer to the dml2_timing_cfg structure to be adjusted. + * @pipe: Pointer to the pipe_ctx structure containing the horizontal blanking borrow value. + * + * This function modifies the horizontal active and blank end timings by adding and subtracting + * the horizontal blanking borrow value from the pipe context, respectively. + */ +static void adjust_dml21_hblank_timing_config_from_pipe_ctx(struct dml2_timing_cfg *timing, struct pipe_ctx *pipe) +{ + timing->h_active += pipe->hblank_borrow; + timing->h_blank_end -= pipe->hblank_borrow; +} + static void populate_dml21_output_config_from_stream_state(struct dml2_link_output_cfg *output, struct dc_stream_state *stream, const struct pipe_ctx *pipe) { @@ -709,6 +747,7 @@ static const struct scaler_data *get_scaler_data_for_plane( temp_pipe->plane_state = pipe->plane_state; temp_pipe->plane_res.scl_data.taps = pipe->plane_res.scl_data.taps; temp_pipe->stream_res = pipe->stream_res; + temp_pipe->hblank_borrow = pipe->hblank_borrow; dml_ctx->config.callbacks.build_scaling_params(temp_pipe); break; } @@ -859,7 +898,7 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm plane->immediate_flip = plane_state->flip_immediate; plane->composition.rect_out_height_spans_vactive = - plane_state->dst_rect.height >= stream->timing.v_addressable && + plane_state->dst_rect.height >= stream->src.height && stream->dst.height >= stream->timing.v_addressable; } @@ -973,6 +1012,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location <= __DML2_WRAPPER_MAX_STREAMS_PLANES__); populate_dml21_timing_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, context->streams[stream_index], dml_ctx); + adjust_dml21_hblank_timing_config_from_pipe_ctx(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, &context->res_ctx.pipe_ctx[stream_index]); populate_dml21_output_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].output, context->streams[stream_index], &context->res_ctx.pipe_ctx[stream_index]); populate_dml21_stream_overrides_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location], context->streams[stream_index]); @@ -1036,6 +1076,7 @@ void dml21_copy_clocks_to_dc_state(struct dml2_context *in_ctx, struct dc_state context->bw_ctx.bw.dcn.clk.p_state_change_support = in_ctx->v21.mode_programming.programming->uclk_pstate_supported; context->bw_ctx.bw.dcn.clk.dtbclk_en = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.dtbrefclk_khz > 0; context->bw_ctx.bw.dcn.clk.ref_dtbclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.dtbrefclk_khz; + context->bw_ctx.bw.dcn.clk.socclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.socclk_khz; } void dml21_extract_legacy_watermark_set(const struct dc *in_dc, struct dcn_watermarks *watermark, enum dml2_dchub_watermark_reg_set_index reg_set_idx, struct dml2_context *in_ctx) @@ -1110,12 +1151,12 @@ void dml21_populate_pipe_ctx_dlg_params(struct dml2_context *dml_ctx, struct dc_ struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; union dml2_global_sync_programming *global_sync = &stream_programming->global_sync; - hactive = timing->h_addressable + timing->h_border_left + timing->h_border_right; + hactive = timing->h_addressable + timing->h_border_left + timing->h_border_right + pipe_ctx->hblank_borrow; vactive = timing->v_addressable + timing->v_border_bottom + timing->v_border_top; hblank_start = pipe_ctx->stream->timing.h_total - pipe_ctx->stream->timing.h_front_porch; vblank_start = pipe_ctx->stream->timing.v_total - pipe_ctx->stream->timing.v_front_porch; - hblank_end = hblank_start - timing->h_addressable - timing->h_border_left - timing->h_border_right; + hblank_end = hblank_start - timing->h_addressable - timing->h_border_left - timing->h_border_right - pipe_ctx->hblank_borrow; vblank_end = vblank_start - timing->v_addressable - timing->v_border_top - timing->v_border_bottom; if (dml_ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM) { diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c index d35dd507cb9f..bbc28b9a15a3 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c @@ -13,11 +13,11 @@ static bool dml21_allocate_memory(struct dml2_context **dml_ctx) { - *dml_ctx = (struct dml2_context *)kzalloc(sizeof(struct dml2_context), GFP_KERNEL); + *dml_ctx = kzalloc(sizeof(struct dml2_context), GFP_KERNEL); if (!(*dml_ctx)) return false; - (*dml_ctx)->v21.dml_init.dml2_instance = (struct dml2_instance *)kzalloc(sizeof(struct dml2_instance), GFP_KERNEL); + (*dml_ctx)->v21.dml_init.dml2_instance = kzalloc(sizeof(struct dml2_instance), GFP_KERNEL); if (!((*dml_ctx)->v21.dml_init.dml2_instance)) return false; @@ -27,7 +27,7 @@ static bool dml21_allocate_memory(struct dml2_context **dml_ctx) (*dml_ctx)->v21.mode_support.display_config = &(*dml_ctx)->v21.display_config; (*dml_ctx)->v21.mode_programming.display_config = (*dml_ctx)->v21.mode_support.display_config; - (*dml_ctx)->v21.mode_programming.programming = (struct dml2_display_cfg_programming *)kzalloc(sizeof(struct dml2_display_cfg_programming), GFP_KERNEL); + (*dml_ctx)->v21.mode_programming.programming = kzalloc(sizeof(struct dml2_display_cfg_programming), GFP_KERNEL); if (!((*dml_ctx)->v21.mode_programming.programming)) return false; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h index 83fc15bf13cf..25b607e7b726 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h @@ -88,6 +88,7 @@ struct dml2_display_arb_regs { uint32_t sdpif_request_rate_limit; uint32_t allow_sdpif_rate_limit_when_cstate_req; uint32_t dcfclk_deep_sleep_hysteresis; + uint32_t pstate_stall_threshold; }; struct dml2_cursor_dlg_regs{ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c index 0aa4e4d343b0..3d41ffde91c1 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c @@ -159,6 +159,7 @@ static void create_phantom_stream_from_main_stream(struct dml2_stream_parameters phantom->timing.v_total = meta->v_total; phantom->timing.v_active = meta->v_active; phantom->timing.v_front_porch = meta->v_front_porch; + phantom->timing.v_blank_end = phantom->timing.v_total - phantom->timing.v_front_porch - phantom->timing.v_active; phantom->timing.vblank_nom = phantom->timing.v_total - phantom->timing.v_active; phantom->timing.drr_config.enabled = false; } diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c index 3ea54fd52e46..601320b1be81 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c @@ -11,6 +11,7 @@ #define DML2_MAX_FMT_420_BUFFER_WIDTH 4096 #define DML_MAX_NUM_OF_SLICES_PER_DSC 4 +#define ALLOW_SDPIF_RATE_LIMIT_PRE_CSTATE const char *dml2_core_internal_bw_type_str(enum dml2_core_internal_bw_type bw_type) { @@ -3886,6 +3887,10 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch #endif *p->hw_debug5 = false; +#ifdef ALLOW_SDPIF_RATE_LIMIT_PRE_CSTATE + if (p->NumberOfActiveSurfaces > 1) + *p->hw_debug5 = true; +#else for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) { if (!(p->mrq_present) && (!(*p->UnboundedRequestEnabled)) && (TotalActiveDPP == 1) && p->display_cfg->plane_descriptors[k].surface.dcc.enable @@ -3901,6 +3906,7 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch dml2_printf("DML::%s: k=%u hw_debug5 = %u\n", __func__, k, *p->hw_debug5); #endif } +#endif } static enum dml2_odm_mode DecideODMMode(unsigned int HActive, @@ -12236,6 +12242,8 @@ static void rq_dlg_get_dlg_reg( static void rq_dlg_get_arb_params(const struct dml2_display_cfg *display_cfg, const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_display_arb_regs *arb_param) { + double refclk_freq_in_mhz = (display_cfg->overrides.hw.dlg_ref_clk_mhz > 0) ? (double)display_cfg->overrides.hw.dlg_ref_clk_mhz : mode_lib->soc.dchub_refclk_mhz; + arb_param->max_req_outstanding = mode_lib->soc.max_outstanding_reqs; arb_param->min_req_outstanding = mode_lib->soc.max_outstanding_reqs; // turn off the sat level feature if this set to max arb_param->sdpif_request_rate_limit = (3 * mode_lib->ip.words_per_channel * mode_lib->soc.clk_table.dram_config.channel_count) / 4; @@ -12247,6 +12255,7 @@ static void rq_dlg_get_arb_params(const struct dml2_display_cfg *display_cfg, co arb_param->compbuf_size = mode_lib->mp.CompressedBufferSizeInkByte / mode_lib->ip.compressed_buffer_segment_size_in_kbytes; arb_param->allow_sdpif_rate_limit_when_cstate_req = dml_get_hw_debug5(mode_lib); arb_param->dcfclk_deep_sleep_hysteresis = dml_get_dcfclk_deep_sleep_hysteresis(mode_lib); + arb_param->pstate_stall_threshold = (unsigned int)(mode_lib->ip_caps.fams2.max_allow_delay_us * refclk_freq_in_mhz); #ifdef __DML_VBA_DEBUG__ dml2_printf("DML::%s: max_req_outstanding = %d\n", __func__, arb_param->max_req_outstanding); diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c index ab229e1598ae..714b5c39b7e6 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c @@ -425,6 +425,7 @@ static void create_phantom_stream_from_main_stream(struct dml2_stream_parameters phantom->timing.v_total = meta->v_total; phantom->timing.v_active = meta->v_active; phantom->timing.v_front_porch = meta->v_front_porch; + phantom->timing.v_blank_end = phantom->timing.v_total - phantom->timing.v_front_porch - phantom->timing.v_active; phantom->timing.vblank_nom = phantom->timing.v_total - phantom->timing.v_active; phantom->timing.drr_config.enabled = false; } diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c index 1cf9015e854a..92269f0e50ed 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c @@ -8,6 +8,7 @@ #include "dml2_pmo_dcn4_fams2.h" static const double MIN_VACTIVE_MARGIN_PCT = 0.25; // We need more than non-zero margin because DET buffer granularity can alter vactive latency hiding +static const double MIN_BLANK_STUTTER_FACTOR = 3.0; static const struct dml2_pmo_pstate_strategy base_strategy_list_1_display[] = { // VActive Preferred @@ -1798,6 +1799,7 @@ bool pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_supp } if (s->pmo_dcn4.num_pstate_candidates > 0) { + s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.num_pstate_candidates - 1].allow_state_increase = true; s->pmo_dcn4.cur_pstate_candidate = -1; return true; } else { @@ -2139,6 +2141,7 @@ bool pmo_dcn4_fams2_init_for_stutter(struct dml2_pmo_init_for_stutter_in_out *in struct dml2_pmo_instance *pmo = in_out->instance; bool stutter_period_meets_z8_eco = true; bool z8_stutter_optimization_too_expensive = false; + bool stutter_optimization_too_expensive = false; double line_time_us, vblank_nom_time_us; unsigned int i; @@ -2160,10 +2163,15 @@ bool pmo_dcn4_fams2_init_for_stutter(struct dml2_pmo_init_for_stutter_in_out *in line_time_us = (double)in_out->base_display_config->display_config.stream_descriptors[i].timing.h_total / (in_out->base_display_config->display_config.stream_descriptors[i].timing.pixel_clock_khz * 1000) * 1000000; vblank_nom_time_us = line_time_us * in_out->base_display_config->display_config.stream_descriptors[i].timing.vblank_nom; - if (vblank_nom_time_us < pmo->soc_bb->power_management_parameters.z8_stutter_exit_latency_us) { + if (vblank_nom_time_us < pmo->soc_bb->power_management_parameters.z8_stutter_exit_latency_us * MIN_BLANK_STUTTER_FACTOR) { z8_stutter_optimization_too_expensive = true; break; } + + if (vblank_nom_time_us < pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us * MIN_BLANK_STUTTER_FACTOR) { + stutter_optimization_too_expensive = true; + break; + } } pmo->scratch.pmo_dcn4.num_stutter_candidates = 0; @@ -2179,7 +2187,7 @@ bool pmo_dcn4_fams2_init_for_stutter(struct dml2_pmo_init_for_stutter_in_out *in pmo->scratch.pmo_dcn4.z8_vblank_optimizable = false; } - if (pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us > 0) { + if (!stutter_optimization_too_expensive && pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us > 0) { pmo->scratch.pmo_dcn4.optimal_vblank_reserved_time_for_stutter_us[pmo->scratch.pmo_dcn4.num_stutter_candidates] = (unsigned int)pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us; pmo->scratch.pmo_dcn4.num_stutter_candidates++; } diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c index 6eccf0241d85..1ed21c1b86a5 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c @@ -258,12 +258,25 @@ static unsigned int find_preferred_pipe_candidates(const struct dc_state *existi * However this condition comes with a caveat. We need to ignore pipes that will * require a change in OPP but still have the same stream id. For example during * an MPC to ODM transiton. + * + * Adding check to avoid pipe select on the head pipe by utilizing dc resource + * helper function resource_get_primary_dpp_pipe and comparing the pipe index. */ if (existing_state) { for (i = 0; i < pipe_count; i++) { if (existing_state->res_ctx.pipe_ctx[i].stream && existing_state->res_ctx.pipe_ctx[i].stream->stream_id == stream_id) { + struct pipe_ctx *head_pipe = + resource_is_pipe_type(&existing_state->res_ctx.pipe_ctx[i], DPP_PIPE) ? + resource_get_primary_dpp_pipe(&existing_state->res_ctx.pipe_ctx[i]) : + NULL; + + // we should always respect the head pipe from selection + if (head_pipe && head_pipe->pipe_idx == i) + continue; if (existing_state->res_ctx.pipe_ctx[i].plane_res.hubp && - existing_state->res_ctx.pipe_ctx[i].plane_res.hubp->opp_id != i) + existing_state->res_ctx.pipe_ctx[i].plane_res.hubp->opp_id != i && + (existing_state->res_ctx.pipe_ctx[i].prev_odm_pipe || + existing_state->res_ctx.pipe_ctx[i].next_odm_pipe)) continue; preferred_pipe_candidates[num_preferred_candidates++] = i; @@ -292,6 +305,14 @@ static unsigned int find_last_resort_pipe_candidates(const struct dc_state *exis */ if (existing_state) { for (i = 0; i < pipe_count; i++) { + struct pipe_ctx *head_pipe = + resource_is_pipe_type(&existing_state->res_ctx.pipe_ctx[i], DPP_PIPE) ? + resource_get_primary_dpp_pipe(&existing_state->res_ctx.pipe_ctx[i]) : + NULL; + + // we should always respect the head pipe from selection + if (head_pipe && head_pipe->pipe_idx == i) + continue; if ((existing_state->res_ctx.pipe_ctx[i].plane_res.hubp && existing_state->res_ctx.pipe_ctx[i].plane_res.hubp->opp_id != i) || existing_state->res_ctx.pipe_ctx[i].stream_res.tg) diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c index 11c904ae2958..c4c52173ef22 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c @@ -303,6 +303,7 @@ void build_unoptimized_policy_settings(enum dml_project_id project, struct dml_m if (project == dml_project_dcn35 || project == dml_project_dcn351) { policy->DCCProgrammingAssumesScanDirectionUnknownFinal = false; + policy->EnhancedPrefetchScheduleAccelerationFinal = 0; policy->AllowForPStateChangeOrStutterInVBlankFinal = dml_prefetch_support_uclk_fclk_and_stutter_if_possible; /*new*/ policy->UseOnlyMaxPrefetchModes = 1; } diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c index 866b0abcff1b..9190c1328d5b 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c @@ -209,8 +209,6 @@ static bool optimize_configuration(struct dml2_context *dml2, struct dml2_wrappe p->cur_display_config->output.OutputEncoder[0], p->cur_mode_support_info->DSCEnabled[0]) - 1; if (odms_needed <= unused_dpps) { - unused_dpps -= odms_needed; - if (odms_needed == 1) { p->new_policy->ODMUse[0] = dml_odm_use_policy_combine_2to1; optimization_done = true; diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h index cd1706d301e7..f09cba8e29cc 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h @@ -690,6 +690,7 @@ struct dcn20_dpp { int lb_memory_size; int lb_bits_per_entry; bool is_write_to_ram_a_safe; + bool dispclk_r_gate_disable; struct scaler_data scl_data; struct pwl_params pwl_data; }; diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h index b110f35ef66b..f236824126e9 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h @@ -572,6 +572,7 @@ struct dcn3_dpp { int lb_memory_size; int lb_bits_per_entry; bool is_write_to_ram_a_safe; + bool dispclk_r_gate_disable; struct scaler_data scl_data; struct pwl_params pwl_data; }; diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c index 8473c694bfdc..62b7012cda43 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c @@ -50,13 +50,21 @@ void dpp35_dppclk_control( DPPCLK_RATE_CONTROL, dppclk_div, DPP_CLOCK_ENABLE, 1); else - REG_UPDATE_2(DPP_CONTROL, + if (dpp->dispclk_r_gate_disable) + REG_UPDATE_2(DPP_CONTROL, DPP_CLOCK_ENABLE, 1, DISPCLK_R_GATE_DISABLE, 1); + else + REG_UPDATE(DPP_CONTROL, + DPP_CLOCK_ENABLE, 1); } else - REG_UPDATE_2(DPP_CONTROL, + if (dpp->dispclk_r_gate_disable) + REG_UPDATE_2(DPP_CONTROL, DPP_CLOCK_ENABLE, 0, DISPCLK_R_GATE_DISABLE, 0); + else + REG_UPDATE(DPP_CONTROL, + DPP_CLOCK_ENABLE, 0); } void dpp35_program_bias_and_scale_fcnv( @@ -128,6 +136,10 @@ bool dpp35_construct( (const struct dcn3_dpp_mask *)(tf_mask)); dpp->base.funcs = &dcn35_dpp_funcs; + + // w/a for cursor memory stuck in LS by programming DISPCLK_R_GATE_DISABLE, limit w/a to some ASIC revs + if (dpp->base.ctx->asic_id.hw_internal_rev <= 0x10) + dpp->dispclk_r_gate_disable = true; return ret; } diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c index 5105fd580017..2f92e7d4981b 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c @@ -1091,7 +1091,8 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base, /* ISHARP_DELTA_LUT */ dpp401_dscl_set_isharp_filter(dpp, scl_data->dscl_prog_data.isharp_delta); dpp->scl_data.dscl_prog_data.sharpness_level = scl_data->dscl_prog_data.sharpness_level; - dpp->scl_data.dscl_prog_data.isharp_delta = scl_data->dscl_prog_data.isharp_delta; + memcpy(dpp->scl_data.dscl_prog_data.isharp_delta, scl_data->dscl_prog_data.isharp_delta, + sizeof(uint32_t) * ISHARP_LUT_TABLE_SIZE); if (memcmp(&dpp->scl_data, scl_data, sizeof(*scl_data)) == 0) return; diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c index ebd5df1a36e8..d9aaebfa3a0a 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c @@ -1093,14 +1093,11 @@ static bool setup_dsc_config( if (!is_dsc_possible) goto done; - // Final decission: can we do DSC or not? - if (is_dsc_possible) { - // Fill out the rest of DSC settings - dsc_cfg->block_pred_enable = dsc_common_caps.is_block_pred_supported; - dsc_cfg->linebuf_depth = dsc_common_caps.lb_bit_depth; - dsc_cfg->version_minor = (dsc_common_caps.dsc_version & 0xf0) >> 4; - dsc_cfg->is_dp = dsc_sink_caps->is_dp; - } + /* Fill out the rest of DSC settings */ + dsc_cfg->block_pred_enable = dsc_common_caps.is_block_pred_supported; + dsc_cfg->linebuf_depth = dsc_common_caps.lb_bit_depth; + dsc_cfg->version_minor = (dsc_common_caps.dsc_version & 0xf0) >> 4; + dsc_cfg->is_dp = dsc_sink_caps->is_dp; done: if (!is_dsc_possible) diff --git a/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_cm_common.h b/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_cm_common.h index bd98b327a6c7..b86347c9b038 100644 --- a/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_cm_common.h +++ b/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_cm_common.h @@ -63,10 +63,6 @@ bool cm3_helper_translate_curve_to_hw_format( const struct dc_transfer_func *output_tf, struct pwl_params *lut_params, bool fixpoint); -bool cm3_helper_translate_curve_to_degamma_hw_format( - const struct dc_transfer_func *output_tf, - struct pwl_params *lut_params); - bool cm3_helper_convert_to_custom_float( struct pwl_result_data *rgb_resulted, struct curve_points3 *corner_points, diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c index f344478e9bd4..b099989d9364 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c @@ -443,7 +443,6 @@ struct gpio *dal_gpio_create_irq( case GPIO_ID_GPIO_PAD: break; default: - id = GPIO_ID_HPD; ASSERT_CRITICAL(false); return NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/hubbub/dcn10/dcn10_hubbub.h index a1e2cde9c4cc..9fbd45c7dfef 100644 --- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn10/dcn10_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn10/dcn10_hubbub.h @@ -198,6 +198,9 @@ struct dcn_hubbub_registers { uint32_t DCHUBBUB_ARB_REFCYC_PER_META_TRIP_B; uint32_t DCHUBBUB_ARB_FRAC_URG_BW_MALL_A; uint32_t DCHUBBUB_ARB_FRAC_URG_BW_MALL_B; + uint32_t DCHUBBUB_TIMEOUT_DETECTION_CTRL1; + uint32_t DCHUBBUB_TIMEOUT_DETECTION_CTRL2; + uint32_t DCHUBBUB_CTRL_STATUS; }; #define HUBBUB_REG_FIELD_LIST_DCN32(type) \ @@ -313,7 +316,17 @@ struct dcn_hubbub_registers { type DCN_VM_ERROR_VMID;\ type DCN_VM_ERROR_TABLE_LEVEL;\ type DCN_VM_ERROR_PIPE;\ - type DCN_VM_ERROR_INTERRUPT_STATUS + type DCN_VM_ERROR_INTERRUPT_STATUS;\ + type DCHUBBUB_TIMEOUT_ERROR_STATUS;\ + type DCHUBBUB_TIMEOUT_REQ_STALL_THRESHOLD;\ + type DCHUBBUB_TIMEOUT_PSTATE_STALL_THRESHOLD;\ + type DCHUBBUB_TIMEOUT_DETECTION_EN;\ + type DCHUBBUB_TIMEOUT_TIMER_RESET;\ + type ROB_UNDERFLOW_STATUS;\ + type ROB_OVERFLOW_STATUS;\ + type ROB_OVERFLOW_CLEAR;\ + type DCHUBBUB_HW_DEBUG;\ + type CSTATE_SWATH_CHK_GOOD_MODE #define HUBBUB_STUTTER_REG_FIELD_LIST(type) \ type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A;\ diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn20/dcn20_hubbub.h b/drivers/gpu/drm/amd/display/dc/hubbub/dcn20/dcn20_hubbub.h index 036bb3e6c957..46d8f5c70750 100644 --- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn20/dcn20_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn20/dcn20_hubbub.h @@ -96,6 +96,7 @@ struct dcn20_hubbub { unsigned int det1_size; unsigned int det2_size; unsigned int det3_size; + bool allow_sdpif_rate_limit_when_cstate_req; }; void hubbub2_construct(struct dcn20_hubbub *hubbub, diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c index 37d26fa0b6fb..92fab471b183 100644 --- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c @@ -1192,6 +1192,37 @@ static void dcn401_wait_for_det_update(struct hubbub *hubbub, int hubp_inst) } } +static bool dcn401_program_arbiter(struct hubbub *hubbub, struct dml2_display_arb_regs *arb_regs, bool safe_to_lower) +{ + struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + + bool wm_pending = false; + uint32_t temp; + + /* request backpressure and outstanding return threshold (unused)*/ + //REG_UPDATE(DCHUBBUB_TIMEOUT_DETECTION_CTRL1, DCHUBBUB_TIMEOUT_REQ_STALL_THRESHOLD, arb_regs->req_stall_threshold); + + /* P-State stall threshold */ + REG_UPDATE(DCHUBBUB_TIMEOUT_DETECTION_CTRL2, DCHUBBUB_TIMEOUT_PSTATE_STALL_THRESHOLD, arb_regs->pstate_stall_threshold); + + if (safe_to_lower || arb_regs->allow_sdpif_rate_limit_when_cstate_req > hubbub2->allow_sdpif_rate_limit_when_cstate_req) { + hubbub2->allow_sdpif_rate_limit_when_cstate_req = arb_regs->allow_sdpif_rate_limit_when_cstate_req; + + /* only update the required bits */ + REG_GET(DCHUBBUB_CTRL_STATUS, DCHUBBUB_HW_DEBUG, &temp); + if (hubbub2->allow_sdpif_rate_limit_when_cstate_req) { + temp |= (1 << 5); + } else { + temp &= ~(1 << 5); + } + REG_UPDATE(DCHUBBUB_CTRL_STATUS, DCHUBBUB_HW_DEBUG, temp); + } else { + wm_pending = true; + } + + return wm_pending; +} + static const struct hubbub_funcs hubbub4_01_funcs = { .update_dchub = hubbub2_update_dchub, .init_dchub_sys_ctx = hubbub3_init_dchub_sys_ctx, @@ -1215,6 +1246,7 @@ static const struct hubbub_funcs hubbub4_01_funcs = { .program_det_segments = dcn401_program_det_segments, .program_compbuf_segments = dcn401_program_compbuf_segments, .wait_for_det_update = dcn401_wait_for_det_update, + .program_arbiter = dcn401_program_arbiter, }; void hubbub401_construct(struct dcn20_hubbub *hubbub2, diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.h b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.h index f35f19ba3e18..b1d9ea9d1c3d 100644 --- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.h @@ -123,8 +123,17 @@ HUBBUB_SF(DCHUBBUB_CLOCK_CNTL, DCFCLK_R_DCHUBBUB_GATE_DIS, mask_sh),\ HUBBUB_SF(DCHUBBUB_SDPIF_CFG0, SDPIF_PORT_CONTROL, mask_sh),\ HUBBUB_SF(DCHUBBUB_SDPIF_CFG1, SDPIF_MAX_NUM_OUTSTANDING, mask_sh),\ - HUBBUB_SF(DCHUBBUB_MEM_PWR_MODE_CTRL, DET_MEM_PWR_LS_MODE, mask_sh) - + HUBBUB_SF(DCHUBBUB_MEM_PWR_MODE_CTRL, DET_MEM_PWR_LS_MODE, mask_sh),\ + HUBBUB_SF(DCHUBBUB_TIMEOUT_DETECTION_CTRL1, DCHUBBUB_TIMEOUT_ERROR_STATUS, mask_sh),\ + HUBBUB_SF(DCHUBBUB_TIMEOUT_DETECTION_CTRL1, DCHUBBUB_TIMEOUT_REQ_STALL_THRESHOLD, mask_sh),\ + HUBBUB_SF(DCHUBBUB_TIMEOUT_DETECTION_CTRL2, DCHUBBUB_TIMEOUT_PSTATE_STALL_THRESHOLD, mask_sh),\ + HUBBUB_SF(DCHUBBUB_TIMEOUT_DETECTION_CTRL2, DCHUBBUB_TIMEOUT_DETECTION_EN, mask_sh),\ + HUBBUB_SF(DCHUBBUB_TIMEOUT_DETECTION_CTRL2, DCHUBBUB_TIMEOUT_TIMER_RESET, mask_sh),\ + HUBBUB_SF(DCHUBBUB_CTRL_STATUS, ROB_UNDERFLOW_STATUS, mask_sh),\ + HUBBUB_SF(DCHUBBUB_CTRL_STATUS, ROB_OVERFLOW_STATUS, mask_sh),\ + HUBBUB_SF(DCHUBBUB_CTRL_STATUS, ROB_OVERFLOW_CLEAR, mask_sh),\ + HUBBUB_SF(DCHUBBUB_CTRL_STATUS, DCHUBBUB_HW_DEBUG, mask_sh),\ + HUBBUB_SF(DCHUBBUB_CTRL_STATUS, CSTATE_SWATH_CHK_GOOD_MODE, mask_sh) bool hubbub401_program_urgent_watermarks( struct hubbub *hubbub, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c index 4fbed0298adf..81f4c386c287 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c @@ -1039,7 +1039,8 @@ void dce110_edp_backlight_control( link_transmitter_control(ctx->dc_bios, &cntl); if (enable && link->dpcd_sink_ext_caps.bits.oled && - !link->dc->config.edp_no_power_sequencing) { + !link->dc->config.edp_no_power_sequencing && + !link->local_sink->edid_caps.panel_patch.oled_optimize_display_on) { post_T7_delay += link->panel_config.pps.extra_post_t7_ms; msleep(post_T7_delay); } @@ -3142,9 +3143,10 @@ static void dce110_set_cursor_attribute(struct pipe_ctx *pipe_ctx) } bool dce110_set_backlight_level(struct pipe_ctx *pipe_ctx, - uint32_t backlight_pwm_u16_16, - uint32_t frame_ramp) + struct set_backlight_level_params *backlight_level_params) { + uint32_t backlight_pwm_u16_16 = backlight_level_params->backlight_pwm_u16_16; + uint32_t frame_ramp = backlight_level_params->frame_ramp; struct dc_link *link = pipe_ctx->stream->link; struct dc *dc = link->ctx->dc; struct abm *abm = pipe_ctx->stream_res.abm; @@ -3315,7 +3317,7 @@ void dce110_disable_link_output(struct dc_link *link, * from enable/disable link output and only call edp panel control * in enable_link_dp and disable_link_dp once. */ - if (dmcu != NULL && dmcu->funcs->lock_phy) + if (dmcu != NULL && dmcu->funcs->unlock_phy) dmcu->funcs->unlock_phy(dmcu); dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY); } diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h index ed3cc3648e8e..06789ac3a224 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h @@ -88,8 +88,7 @@ void dce110_edp_wait_for_hpd_ready( bool power_up); bool dce110_set_backlight_level(struct pipe_ctx *pipe_ctx, - uint32_t backlight_pwm_u16_16, - uint32_t frame_ramp); + struct set_backlight_level_params *params); void dce110_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx); void dce110_set_pipe(struct pipe_ctx *pipe_ctx); void dce110_disable_link_output(struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c index a6a1db5ba8ba..681bb92c6069 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c @@ -3453,7 +3453,6 @@ static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx) r2 = test_pipe->plane_res.scl_data.recout; r2_r = r2.x + r2.width; r2_b = r2.y + r2.height; - split_pipe = test_pipe; /** * There is another half plane on same layer because of diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c index a80c08582932..b029ec1b26d3 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c @@ -1458,8 +1458,12 @@ void dcn20_pipe_control_lock( } else { if (lock) pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg); - else - pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg); + else { + if (dc->hwseq->funcs.perform_3dlut_wa_unlock) + dc->hwseq->funcs.perform_3dlut_wa_unlock(pipe); + else + pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg); + } } } @@ -1732,7 +1736,6 @@ static void dcn20_update_dchubp_dpp( if (pipe_ctx->update_flags.bits.scaler || plane_state->update_flags.bits.scaling_change || plane_state->update_flags.bits.position_change || - plane_state->update_flags.bits.clip_size_change || plane_state->update_flags.bits.per_pixel_alpha_change || pipe_ctx->stream->update_flags.bits.scaling) { pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha; @@ -1745,7 +1748,6 @@ static void dcn20_update_dchubp_dpp( if (pipe_ctx->update_flags.bits.viewport || (context == dc->current_state && plane_state->update_flags.bits.position_change) || (context == dc->current_state && plane_state->update_flags.bits.scaling_change) || - (context == dc->current_state && plane_state->update_flags.bits.clip_size_change) || (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) { hubp->funcs->mem_program_viewport( @@ -1923,9 +1925,9 @@ static void dcn20_program_pipe( dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->hubp_regs.det_size); } - if (pipe_ctx->update_flags.raw || - (pipe_ctx->plane_state && pipe_ctx->plane_state->update_flags.raw) || - pipe_ctx->stream->update_flags.raw) + if (pipe_ctx->plane_state && (pipe_ctx->update_flags.raw || + pipe_ctx->plane_state->update_flags.raw || + pipe_ctx->stream->update_flags.raw)) dcn20_update_dchubp_dpp(dc, pipe_ctx, context); if (pipe_ctx->plane_state && (pipe_ctx->update_flags.bits.enable || @@ -2056,22 +2058,15 @@ void dcn20_program_front_end_for_ctx( */ for (i = 0; i < dc->res_pool->pipe_count; i++) { struct dc_stream_state *stream = dc->current_state->res_ctx.pipe_ctx[i].stream; + pipe = &dc->current_state->res_ctx.pipe_ctx[i]; if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable && stream && - dc_state_get_pipe_subvp_type(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM) { + dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_PHANTOM) { struct timing_generator *tg = dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg; if (tg->funcs->enable_crtc) { - if (dc->hwss.blank_phantom) { - int main_pipe_width = 0, main_pipe_height = 0; - struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(dc->current_state, dc->current_state->res_ctx.pipe_ctx[i].stream); - - if (phantom_stream) { - main_pipe_width = phantom_stream->dst.width; - main_pipe_height = phantom_stream->dst.height; - } - - dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height); + if (dc->hwseq->funcs.blank_pixel_data) { + dc->hwseq->funcs.blank_pixel_data(dc, pipe, true); } tg->funcs->enable_crtc(tg); } @@ -2255,9 +2250,9 @@ void dcn20_post_unlock_program_front_end( struct timing_generator *tg = pipe->stream_res.tg; - if (tg->funcs->get_double_buffer_pending) { + if (tg->funcs->get_optc_double_buffer_pending) { for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_US / polling_interval_us - && tg->funcs->get_double_buffer_pending(tg); j++) + && tg->funcs->get_optc_double_buffer_pending(tg); j++) udelay(polling_interval_us); } } @@ -2771,7 +2766,6 @@ void dcn20_reset_back_end_for_pipe( struct pipe_ctx *pipe_ctx, struct dc_state *context) { - int i; struct dc_link *link = pipe_ctx->stream->link; const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); @@ -2838,19 +2832,16 @@ void dcn20_reset_back_end_for_pipe( } } - for (i = 0; i < dc->res_pool->pipe_count; i++) - if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx) - break; - - if (i == dc->res_pool->pipe_count) - return; - /* * In case of a dangling plane, setting this to NULL unconditionally * causes failures during reset hw ctx where, if stream is NULL, * it is expected that the pipe_ctx pointers to pipes and plane are NULL. */ pipe_ctx->stream = NULL; + pipe_ctx->top_pipe = NULL; + pipe_ctx->bottom_pipe = NULL; + pipe_ctx->next_odm_pipe = NULL; + pipe_ctx->prev_odm_pipe = NULL; DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n", pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst); } diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c index 1ea95f8d4cbc..61efb15572ff 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c @@ -137,7 +137,7 @@ void dcn21_PLAT_58856_wa(struct dc_state *context, struct pipe_ctx *pipe_ctx) pipe_ctx->stream->dpms_off = true; } -static bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, +bool dcn21_dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst, uint32_t pwrseq_inst) { union dmub_rb_cmd cmd; @@ -199,7 +199,7 @@ void dcn21_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx) abm->funcs->set_pipe_ex(abm, otg_inst, SET_ABM_PIPE_IMMEDIATELY_DISABLE, panel_cntl->inst, panel_cntl->pwrseq_inst); } else { - dmub_abm_set_pipe(abm, + dcn21_dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_IMMEDIATELY_DISABLE, panel_cntl->inst, @@ -234,7 +234,7 @@ void dcn21_set_pipe(struct pipe_ctx *pipe_ctx) panel_cntl->inst, panel_cntl->pwrseq_inst); } else { - dmub_abm_set_pipe(abm, otg_inst, + dcn21_dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst, panel_cntl->pwrseq_inst); @@ -242,14 +242,15 @@ void dcn21_set_pipe(struct pipe_ctx *pipe_ctx) } bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx, - uint32_t backlight_pwm_u16_16, - uint32_t frame_ramp) + struct set_backlight_level_params *backlight_level_params) { struct dc_context *dc = pipe_ctx->stream->ctx; struct abm *abm = pipe_ctx->stream_res.abm; struct timing_generator *tg = pipe_ctx->stream_res.tg; struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl; uint32_t otg_inst; + uint32_t backlight_pwm_u16_16 = backlight_level_params->backlight_pwm_u16_16; + uint32_t frame_ramp = backlight_level_params->frame_ramp; if (!abm || !tg || !panel_cntl) return false; @@ -257,7 +258,7 @@ bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx, otg_inst = tg->inst; if (dc->dc->res_pool->dmcu) { - dce110_set_backlight_level(pipe_ctx, backlight_pwm_u16_16, frame_ramp); + dce110_set_backlight_level(pipe_ctx, backlight_level_params); return true; } @@ -268,7 +269,7 @@ bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx, panel_cntl->inst, panel_cntl->pwrseq_inst); } else { - dmub_abm_set_pipe(abm, + dcn21_dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.h index 9cee9bdb8de9..f72a27ac1bf1 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.h @@ -47,11 +47,12 @@ void dcn21_optimize_pwr_state( void dcn21_PLAT_58856_wa(struct dc_state *context, struct pipe_ctx *pipe_ctx); +bool dcn21_dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, + uint32_t option, uint32_t panel_inst, uint32_t pwrseq_inst); void dcn21_set_pipe(struct pipe_ctx *pipe_ctx); void dcn21_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx); bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx, - uint32_t backlight_pwm_u16_16, - uint32_t frame_ramp); + struct set_backlight_level_params *params); bool dcn21_is_abm_supported(struct dc *dc, struct dc_state *context, struct dc_stream_state *stream); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c index bded33575493..e89ebfda4873 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c @@ -245,6 +245,7 @@ static bool dcn30_set_mpc_shaper_3dlut(struct pipe_ctx *pipe_ctx, { struct dpp *dpp_base = pipe_ctx->plane_res.dpp; int mpcc_id = pipe_ctx->plane_res.hubp->inst; + struct dc *dc = pipe_ctx->stream->ctx->dc; struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; bool result = false; int acquired_rmu = 0; @@ -283,8 +284,14 @@ static bool dcn30_set_mpc_shaper_3dlut(struct pipe_ctx *pipe_ctx, result = mpc->funcs->program_3dlut(mpc, &stream->lut3d_func->lut_3d, stream->lut3d_func->state.bits.rmu_mux_num); + if (!result) + DC_LOG_ERROR("%s: program_3dlut failed\n", __func__); + result = mpc->funcs->program_shaper(mpc, shaper_lut, stream->lut3d_func->state.bits.rmu_mux_num); + if (!result) + DC_LOG_ERROR("%s: program_shaper failed\n", __func__); + } else { // loop through the available mux and release the requested mpcc_id mpc->funcs->release_rmu(mpc, mpcc_id); @@ -486,7 +493,6 @@ bool dcn30_mmhubbub_warmup( } /*following is the original: warmup each DWB's mcif buffer*/ for (i = 0; i < num_dwb; i++) { - dwb = dc->res_pool->dwbc[wb_info[i].dwb_pipe_inst]; mcif_wb = dc->res_pool->mcif_wb[wb_info[i].dwb_pipe_inst]; /*warmup is for VM mode only*/ if (wb_info[i].mcif_buf_params.p_vmid == 0) @@ -1185,3 +1191,30 @@ void dcn30_prepare_bandwidth(struct dc *dc, if (!dc->clk_mgr->clks.fw_based_mclk_switching) dc_dmub_srv_p_state_delegate(dc, false, context); } + +void dcn30_wait_for_all_pending_updates(const struct pipe_ctx *pipe_ctx) +{ + struct timing_generator *tg = pipe_ctx->stream_res.tg; + bool pending_updates = false; + unsigned int i; + + if (tg && tg->funcs->is_tg_enabled(tg)) { + // Poll for 100ms maximum + for (i = 0; i < 100000; i++) { + pending_updates = false; + if (tg->funcs->get_optc_double_buffer_pending) + pending_updates |= tg->funcs->get_optc_double_buffer_pending(tg); + + if (tg->funcs->get_otg_double_buffer_pending) + pending_updates |= tg->funcs->get_otg_double_buffer_pending(tg); + + if (tg->funcs->get_pipe_update_pending && pipe_ctx->plane_state) + pending_updates |= tg->funcs->get_pipe_update_pending(tg); + + if (!pending_updates) + break; + + udelay(1); + } + } +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h index 6a153e7ce910..4b90b781c4f2 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h @@ -96,4 +96,6 @@ void dcn30_set_hubp_blank(const struct dc *dc, void dcn30_prepare_bandwidth(struct dc *dc, struct dc_state *context); +void dcn30_wait_for_all_pending_updates(const struct pipe_ctx *pipe_ctx); + #endif /* __DC_HWSS_DCN30_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c index 2a8dc40d2847..0e8d32e3dbae 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c @@ -108,7 +108,8 @@ static const struct hw_sequencer_funcs dcn30_funcs = { .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, .get_dcc_en_bits = dcn10_get_dcc_en_bits, .update_visual_confirm_color = dcn10_update_visual_confirm_color, - .is_abm_supported = dcn21_is_abm_supported + .is_abm_supported = dcn21_is_abm_supported, + .wait_for_all_pending_updates = dcn30_wait_for_all_pending_updates, }; static const struct hwseq_private_funcs dcn30_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c index 93e49d87a67c..780ce4c064aa 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c @@ -107,6 +107,7 @@ static const struct hw_sequencer_funcs dcn301_funcs = { .optimize_pwr_state = dcn21_optimize_pwr_state, .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, .update_visual_confirm_color = dcn10_update_visual_confirm_color, + .wait_for_all_pending_updates = dcn30_wait_for_all_pending_updates, }; static const struct hwseq_private_funcs dcn301_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.h index 0bca48ccbfa2..a6e0115a53ee 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.h @@ -23,8 +23,8 @@ * */ -#ifndef __DC_DCN30_INIT_H__ -#define __DC_DCN30_INIT_H__ +#ifndef __DC_DCN301_INIT_H__ +#define __DC_DCN301_INIT_H__ struct dc; diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c index 3d4b31bd9946..03ba01f4ace1 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c @@ -47,9 +47,11 @@ #include "dce/dmub_outbox.h" #include "link.h" #include "dcn10/dcn10_hwseq.h" +#include "dcn21/dcn21_hwseq.h" #include "inc/link_enc_cfg.h" #include "dcn30/dcn30_vpg.h" #include "dce/dce_i2c_hw.h" +#include "dce/dmub_abm_lcd.h" #define DC_LOGGER_INIT(logger) @@ -517,10 +519,18 @@ static void dcn31_reset_back_end_for_pipe( dc->hwss.set_abm_immediate_disable(pipe_ctx); + link = pipe_ctx->stream->link; + + if ((!pipe_ctx->stream->dpms_off || link->link_status.link_active) && + (link->connector_signal == SIGNAL_TYPE_EDP)) + dc->hwss.blank_stream(pipe_ctx); + pipe_ctx->stream_res.tg->funcs->set_dsc_config( pipe_ctx->stream_res.tg, OPTC_DSC_DISABLED, 0, 0); + pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg); + pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false); if (pipe_ctx->stream_res.tg->funcs->set_odm_bypass) pipe_ctx->stream_res.tg->funcs->set_odm_bypass( @@ -532,7 +542,6 @@ static void dcn31_reset_back_end_for_pipe( pipe_ctx->stream_res.tg->funcs->set_drr( pipe_ctx->stream_res.tg, NULL); - link = pipe_ctx->stream->link; /* DPMS may already disable or */ /* dpms_off status is incorrect due to fastboot * feature. When system resume from S4 with second @@ -633,3 +642,51 @@ void dcn31_set_static_screen_control(struct pipe_ctx **pipe_ctx, pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(pipe_ctx[i]->stream_res.tg, triggers, params->num_frames); } + +static void dmub_abm_set_backlight(struct dc_context *dc, + struct set_backlight_level_params *backlight_level_params, uint32_t panel_inst) +{ + union dmub_rb_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.abm_set_backlight.header.type = DMUB_CMD__ABM; + cmd.abm_set_backlight.header.sub_type = DMUB_CMD__ABM_SET_BACKLIGHT; + cmd.abm_set_backlight.abm_set_backlight_data.frame_ramp = backlight_level_params->frame_ramp; + cmd.abm_set_backlight.abm_set_backlight_data.backlight_user_level = backlight_level_params->backlight_pwm_u16_16; + cmd.abm_set_backlight.abm_set_backlight_data.backlight_control_type = + (enum dmub_backlight_control_type) backlight_level_params->control_type; + cmd.abm_set_backlight.abm_set_backlight_data.min_luminance = backlight_level_params->min_luminance; + cmd.abm_set_backlight.abm_set_backlight_data.max_luminance = backlight_level_params->max_luminance; + cmd.abm_set_backlight.abm_set_backlight_data.min_backlight_pwm = backlight_level_params->min_backlight_pwm; + cmd.abm_set_backlight.abm_set_backlight_data.max_backlight_pwm = backlight_level_params->max_backlight_pwm; + cmd.abm_set_backlight.abm_set_backlight_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1; + cmd.abm_set_backlight.abm_set_backlight_data.panel_mask = (0x01 << panel_inst); + cmd.abm_set_backlight.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_backlight_data); + + dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); +} + +bool dcn31_set_backlight_level(struct pipe_ctx *pipe_ctx, + struct set_backlight_level_params *backlight_level_params) +{ + struct dc_context *dc = pipe_ctx->stream->ctx; + struct abm *abm = pipe_ctx->stream_res.abm; + struct timing_generator *tg = pipe_ctx->stream_res.tg; + struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl; + uint32_t otg_inst; + + if (!abm || !tg || !panel_cntl) + return false; + + otg_inst = tg->inst; + + dcn21_dmub_abm_set_pipe(abm, + otg_inst, + SET_ABM_PIPE_NORMAL, + panel_cntl->inst, + panel_cntl->pwrseq_inst); + + dmub_abm_set_backlight(dc, backlight_level_params, panel_cntl->inst); + + return true; +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.h index b8bc939da155..0d09aa8cfb65 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.h @@ -51,6 +51,8 @@ int dcn31_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_ void dcn31_reset_hw_ctx_wrap( struct dc *dc, struct dc_state *context); +bool dcn31_set_backlight_level(struct pipe_ctx *pipe_ctx, + struct set_backlight_level_params *params); bool dcn31_is_abm_supported(struct dc *dc, struct dc_state *context, struct dc_stream_state *stream); void dcn31_init_pipes(struct dc *dc, struct dc_state *context); @@ -59,5 +61,4 @@ void dcn31_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable); void dcn31_set_static_screen_control(struct pipe_ctx **pipe_ctx, int num_pipes, const struct dc_static_screen_params *params); - #endif /* __DC_HWSS_DCN31_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c index 56f3c70d4b55..5f8f45b48720 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c @@ -98,7 +98,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = { .set_flip_control_gsl = dcn20_set_flip_control_gsl, .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, .calc_vupdate_position = dcn10_calc_vupdate_position, - .set_backlight_level = dcn21_set_backlight_level, + .set_backlight_level = dcn31_set_backlight_level, .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, .set_pipe = dcn21_set_pipe, .enable_lvds_link_output = dce110_enable_lvds_link_output, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c index 4e93eeedfc1b..9b88eb72086d 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c @@ -355,6 +355,20 @@ void dcn314_calculate_pix_rate_divider( } } +static bool dcn314_is_pipe_dig_fifo_on(struct pipe_ctx *pipe) +{ + return pipe && pipe->stream + // Check dig's otg instance. + && pipe->stream_res.stream_enc + && pipe->stream_res.stream_enc->funcs->dig_source_otg + && pipe->stream_res.tg->inst == pipe->stream_res.stream_enc->funcs->dig_source_otg(pipe->stream_res.stream_enc) + && pipe->stream->link && pipe->stream->link->link_enc + && pipe->stream->link->link_enc->funcs->is_dig_enabled + && pipe->stream->link->link_enc->funcs->is_dig_enabled(pipe->stream->link->link_enc) + && pipe->stream_res.stream_enc->funcs->is_fifo_enabled + && pipe->stream_res.stream_enc->funcs->is_fifo_enabled(pipe->stream_res.stream_enc); +} + void dcn314_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context, unsigned int current_pipe_idx) { unsigned int i; @@ -371,7 +385,11 @@ void dcn314_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc if (pipe->top_pipe || pipe->prev_odm_pipe) continue; - if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) { + if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal)) && + !pipe->stream->apply_seamless_boot_optimization && + !pipe->stream->apply_edp_fast_boot_optimization) { + if (dcn314_is_pipe_dig_fifo_on(pipe)) + continue; pipe->stream_res.tg->funcs->disable_crtc(pipe->stream_res.tg); reset_sync_context_for_pipe(dc, context, i); otg_disabled[i] = true; @@ -478,7 +496,7 @@ void dcn314_disable_link_output(struct dc_link *link, * from enable/disable link output and only call edp panel control * in enable_link_dp and disable_link_dp once. */ - if (dmcu != NULL && dmcu->funcs->lock_phy) + if (dmcu != NULL && dmcu->funcs->unlock_phy) dmcu->funcs->unlock_phy(dmcu); dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c index 68e6de6b5758..6bdfbf22ce87 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c @@ -100,7 +100,7 @@ static const struct hw_sequencer_funcs dcn314_funcs = { .set_flip_control_gsl = dcn20_set_flip_control_gsl, .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, .calc_vupdate_position = dcn10_calc_vupdate_position, - .set_backlight_level = dcn21_set_backlight_level, + .set_backlight_level = dcn31_set_backlight_level, .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, .set_pipe = dcn21_set_pipe, .enable_lvds_link_output = dce110_enable_lvds_link_output, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c index 2e8c9f738259..fa11f075d1f9 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c @@ -439,6 +439,7 @@ bool dcn32_set_mpc_shaper_3dlut( { struct dpp *dpp_base = pipe_ctx->plane_res.dpp; int mpcc_id = pipe_ctx->plane_res.hubp->inst; + struct dc *dc = pipe_ctx->stream->ctx->dc; struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; bool result = false; @@ -458,13 +459,13 @@ bool dcn32_set_mpc_shaper_3dlut( if (stream->lut3d_func && stream->lut3d_func->state.bits.initialized == 1) { - result = mpc->funcs->program_3dlut(mpc, - &stream->lut3d_func->lut_3d, - mpcc_id); + result = mpc->funcs->program_3dlut(mpc, &stream->lut3d_func->lut_3d, mpcc_id); + if (!result) + DC_LOG_ERROR("%s: program_3dlut failed\n", __func__); - result = mpc->funcs->program_shaper(mpc, - shaper_lut, - mpcc_id); + result = mpc->funcs->program_shaper(mpc, shaper_lut, mpcc_id); + if (!result) + DC_LOG_ERROR("%s: program_shaper failed\n", __func__); } return result; @@ -1048,7 +1049,8 @@ void dcn32_update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) } /* Enable DSC hw block */ - dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt; + dsc_cfg.pic_width = (stream->timing.h_addressable + pipe_ctx->hblank_borrow + + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt; dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; dsc_cfg.color_depth = stream->timing.display_color_depth; @@ -1398,10 +1400,10 @@ void dcn32_disable_link_output(struct dc_link *link, link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF; if (signal == SIGNAL_TYPE_EDP && - link->dc->hwss.edp_backlight_control && + link->dc->hwss.edp_power_control && !link->skip_implict_edp_power_control) link->dc->hwss.edp_power_control(link, false); - else if (dmcu != NULL && dmcu->funcs->lock_phy) + else if (dmcu != NULL && dmcu->funcs->unlock_phy) dmcu->funcs->unlock_phy(dmcu); dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY); @@ -1698,52 +1700,6 @@ void dcn32_init_blank( hws->funcs.wait_for_blank_complete(opp); } -void dcn32_blank_phantom(struct dc *dc, - struct timing_generator *tg, - int width, - int height) -{ - struct dce_hwseq *hws = dc->hwseq; - enum dc_color_space color_space; - struct tg_color black_color = {0}; - struct output_pixel_processor *opp = NULL; - uint32_t num_opps, opp_id_src0, opp_id_src1; - uint32_t otg_active_width, otg_active_height; - uint32_t i; - - /* program opp dpg blank color */ - color_space = COLOR_SPACE_SRGB; - color_space_to_black_color(dc, color_space, &black_color); - - otg_active_width = width; - otg_active_height = height; - - /* get the OPTC source */ - tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1); - ASSERT(opp_id_src0 < dc->res_pool->res_cap->num_opp); - - for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) { - if (dc->res_pool->opps[i] != NULL && dc->res_pool->opps[i]->inst == opp_id_src0) { - opp = dc->res_pool->opps[i]; - break; - } - } - - if (opp && opp->funcs->opp_set_disp_pattern_generator) - opp->funcs->opp_set_disp_pattern_generator( - opp, - CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR, - CONTROLLER_DP_COLOR_SPACE_UDEFINED, - COLOR_DEPTH_UNDEFINED, - &black_color, - otg_active_width, - otg_active_height, - 0); - - if (tg->funcs->is_tg_enabled(tg)) - hws->funcs.wait_for_blank_complete(opp); -} - /* phantom stream id's can change often, but can be identical between contexts. * This function checks for the condition the streams are identical to avoid * redundant pipe transitions. diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h index cac4a08b92a4..0303a5953673 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h @@ -119,11 +119,6 @@ void dcn32_init_blank( struct dc *dc, struct timing_generator *tg); -void dcn32_blank_phantom(struct dc *dc, - struct timing_generator *tg, - int width, - int height); - bool dcn32_is_pipe_topology_transition_seamless(struct dc *dc, const struct dc_state *cur_ctx, const struct dc_state *new_ctx); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c index 3422b564ae98..5ecee7e320da 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c @@ -98,7 +98,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = { .calc_vupdate_position = dcn10_calc_vupdate_position, .apply_idle_power_optimizations = dcn32_apply_idle_power_optimizations, .does_plane_fit_in_mall = NULL, - .set_backlight_level = dcn21_set_backlight_level, + .set_backlight_level = dcn31_set_backlight_level, .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, .hardware_release = dcn30_hardware_release, .set_pipe = dcn21_set_pipe, @@ -117,10 +117,10 @@ static const struct hw_sequencer_funcs dcn32_funcs = { .update_phantom_vp_position = dcn32_update_phantom_vp_position, .update_dsc_pg = dcn32_update_dsc_pg, .apply_update_flags_for_phantom = dcn32_apply_update_flags_for_phantom, - .blank_phantom = dcn32_blank_phantom, .is_pipe_topology_transition_seamless = dcn32_is_pipe_topology_transition_seamless, .calculate_pix_rate_divider = dcn32_calculate_pix_rate_divider, .program_outstanding_updates = dcn32_program_outstanding_updates, + .wait_for_all_pending_updates = dcn30_wait_for_all_pending_updates, }; static const struct hwseq_private_funcs dcn32_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c index bd309dbdf7b2..e599cdc465bf 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c @@ -309,6 +309,7 @@ void dcn35_init_hw(struct dc *dc) dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv); dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr; dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver; + dc->caps.dmub_caps.aux_backlight_support = dc->ctx->dmub_srv->dmub->feature_caps.abm_aux_backlight_support; } if (dc->res_pool->pg_cntl) { @@ -841,6 +842,7 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context) uint32_t num_opps = 0; uint32_t opp_id_src0 = OPP_ID_INVALID; uint32_t opp_id_src1 = OPP_ID_INVALID; + uint32_t optc_dsc_state = 0; // Step 1: To find out which OPTC is running & OPTC DSC is ON // We can't use res_pool->res_cap->num_timing_generator to check @@ -849,7 +851,6 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context) // Some ASICs would be fused display pipes less than the default setting. // In dcnxx_resource_construct function, driver would obatin real information. for (i = 0; i < dc->res_pool->timing_generator_count; i++) { - uint32_t optc_dsc_state = 0; struct timing_generator *tg = dc->res_pool->timing_generators[i]; if (tg->funcs->is_tg_enabled(tg)) { @@ -864,15 +865,18 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context) } } - // Step 2: To power down DSC but skip DSC of running OPTC + // Step 2: To power down DSC but skip DSC of running OPTC for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) { struct dcn_dsc_state s = {0}; - dc->res_pool->dscs[i]->funcs->dsc_read_state(dc->res_pool->dscs[i], &s); + /* avoid reading DSC state when it is not in use as it may be power gated */ + if (optc_dsc_state) { + dc->res_pool->dscs[i]->funcs->dsc_read_state(dc->res_pool->dscs[i], &s); - if ((s.dsc_opp_source == opp_id_src0 || s.dsc_opp_source == opp_id_src1) && - s.dsc_clock_en && s.dsc_fw_en) - continue; + if ((s.dsc_opp_source == opp_id_src0 || s.dsc_opp_source == opp_id_src1) && + s.dsc_clock_en && s.dsc_fw_en) + continue; + } pg_cntl->funcs->dsc_pg_control(pg_cntl, dc->res_pool->dscs[i]->inst, false); } diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c index 2bbf1fef94fd..fd67779c27a9 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c @@ -101,7 +101,7 @@ static const struct hw_sequencer_funcs dcn35_funcs = { .set_flip_control_gsl = dcn20_set_flip_control_gsl, .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, .calc_vupdate_position = dcn10_calc_vupdate_position, - .set_backlight_level = dcn21_set_backlight_level, + .set_backlight_level = dcn31_set_backlight_level, .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, .set_pipe = dcn21_set_pipe, .enable_lvds_link_output = dce110_enable_lvds_link_output, @@ -123,7 +123,6 @@ static const struct hw_sequencer_funcs dcn35_funcs = { .root_clock_control = dcn35_root_clock_control, .set_long_vtotal = dcn35_set_long_vblank, .calculate_pix_rate_divider = dcn32_calculate_pix_rate_divider, - .program_outstanding_updates = dcn32_program_outstanding_updates, }; static const struct hwseq_private_funcs dcn35_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c index d00822e8daa5..3c275a1eff58 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c @@ -100,7 +100,7 @@ static const struct hw_sequencer_funcs dcn351_funcs = { .set_flip_control_gsl = dcn20_set_flip_control_gsl, .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, .calc_vupdate_position = dcn10_calc_vupdate_position, - .set_backlight_level = dcn21_set_backlight_level, + .set_backlight_level = dcn31_set_backlight_level, .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, .set_pipe = dcn21_set_pipe, .enable_lvds_link_output = dce110_enable_lvds_link_output, @@ -122,7 +122,6 @@ static const struct hw_sequencer_funcs dcn351_funcs = { .root_clock_control = dcn35_root_clock_control, .set_long_vtotal = dcn35_set_long_vblank, .calculate_pix_rate_divider = dcn32_calculate_pix_rate_divider, - .program_outstanding_updates = dcn32_program_outstanding_updates, .setup_hpo_hw_control = dcn35_setup_hpo_hw_control, }; diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c index 0b743669f23b..307782592789 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c @@ -506,7 +506,7 @@ void dcn401_populate_mcm_luts(struct dc *dc, dcn401_get_mcm_lut_xable_from_pipe_ctx(dc, pipe_ctx, &shaper_xable, &lut3d_xable, &lut1d_xable); /* 1D LUT */ - if (mcm_luts.lut1d_func && lut3d_xable != MCM_LUT_DISABLE) { + if (mcm_luts.lut1d_func) { memset(&m_lut_params, 0, sizeof(m_lut_params)); if (mcm_luts.lut1d_func->type == TF_TYPE_HWPWL) m_lut_params.pwl = &mcm_luts.lut1d_func->pwl; @@ -521,7 +521,7 @@ void dcn401_populate_mcm_luts(struct dc *dc, mpc->funcs->populate_lut(mpc, MCM_LUT_1DLUT, m_lut_params, lut_bank_a, mpcc_id); } if (mpc->funcs->program_lut_mode) - mpc->funcs->program_lut_mode(mpc, MCM_LUT_1DLUT, lut1d_xable, lut_bank_a, mpcc_id); + mpc->funcs->program_lut_mode(mpc, MCM_LUT_1DLUT, lut1d_xable && m_lut_params.pwl, lut_bank_a, mpcc_id); } /* Shaper */ @@ -669,11 +669,17 @@ bool dcn401_set_mcm_luts(struct pipe_ctx *pipe_ctx, { struct dpp *dpp_base = pipe_ctx->plane_res.dpp; int mpcc_id = pipe_ctx->plane_res.hubp->inst; - struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; + struct dc *dc = pipe_ctx->stream_res.opp->ctx->dc; + struct mpc *mpc = dc->res_pool->mpc; bool result; const struct pwl_params *lut_params = NULL; bool rval; + if (plane_state->mcm_luts.lut3d_data.lut3d_src == DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM) { + dcn401_populate_mcm_luts(dc, pipe_ctx, plane_state->mcm_luts, plane_state->lut_bank_a); + return true; + } + mpc->funcs->set_movable_cm_location(mpc, MPCC_MOVABLE_CM_LOCATION_BEFORE, mpcc_id); pipe_ctx->plane_state->mcm_location = MPCC_MOVABLE_CM_LOCATION_BEFORE; // 1D LUT @@ -814,6 +820,7 @@ enum dc_status dcn401_enable_stream_timing( int opp_cnt = 1; int opp_inst[MAX_PIPES] = {0}; struct pipe_ctx *opp_heads[MAX_PIPES] = {0}; + struct dc_crtc_timing patched_crtc_timing = stream->timing; bool manual_mode; unsigned int tmds_div = PIXEL_RATE_DIV_NA; unsigned int unused_div = PIXEL_RATE_DIV_NA; @@ -844,6 +851,13 @@ enum dc_status dcn401_enable_stream_timing( odm_slice_width, last_odm_slice_width); } + /* set DTBCLK_P */ + if (dc->res_pool->dccg->funcs->set_dtbclk_p_src) { + if (dc_is_dp_signal(stream->signal) || dc_is_virtual_signal(stream->signal)) { + dc->res_pool->dccg->funcs->set_dtbclk_p_src(dc->res_pool->dccg, DPREFCLK, pipe_ctx->stream_res.tg->inst); + } + } + /* HW program guide assume display already disable * by unplug sequence. OTG assume stop. */ @@ -861,9 +875,13 @@ enum dc_status dcn401_enable_stream_timing( if (dc->hwseq->funcs.PLAT_58856_wa && (!dc_is_dp_signal(stream->signal))) dc->hwseq->funcs.PLAT_58856_wa(context, pipe_ctx); + /* if we are borrowing from hblank, h_addressable needs to be adjusted */ + if (dc->debug.enable_hblank_borrow) + patched_crtc_timing.h_addressable = patched_crtc_timing.h_addressable + pipe_ctx->hblank_borrow; + pipe_ctx->stream_res.tg->funcs->program_timing( pipe_ctx->stream_res.tg, - &stream->timing, + &patched_crtc_timing, pipe_ctx->pipe_dlg_param.vready_offset, pipe_ctx->pipe_dlg_param.vstartup_start, pipe_ctx->pipe_dlg_param.vupdate_offset, @@ -1004,8 +1022,6 @@ void dcn401_enable_stream(struct pipe_ctx *pipe_ctx) dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk); } else { - /* need to set DTBCLK_P source to DPREFCLK for DP8B10B */ - dccg->funcs->set_dtbclk_p_src(dccg, DPREFCLK, tg->inst); dccg->funcs->enable_symclk_se(dccg, stream_enc->stream_enc_inst, link_enc->transmitter - TRANSMITTER_UNIPHY_A); } @@ -1063,7 +1079,6 @@ static bool dcn401_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx) r2 = test_pipe->plane_res.scl_data.recout; r2_r = r2.x + r2.width; r2_b = r2.y + r2.height; - split_pipe = test_pipe; /** * There is another half plane on same layer because of @@ -1097,6 +1112,58 @@ void adjust_hotspot_between_slices_for_2x_magnify(uint32_t cursor_width, struct } } +static void disable_link_output_symclk_on_tx_off(struct dc_link *link, enum dp_link_encoding link_encoding) +{ + struct dc *dc = link->ctx->dc; + struct pipe_ctx *pipe_ctx = NULL; + uint8_t i; + + for (i = 0; i < MAX_PIPES; i++) { + pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx->stream && pipe_ctx->stream->link == link && pipe_ctx->top_pipe == NULL) { + pipe_ctx->clock_source->funcs->program_pix_clk( + pipe_ctx->clock_source, + &pipe_ctx->stream_res.pix_clk_params, + link_encoding, + &pipe_ctx->pll_settings); + break; + } + } +} + +void dcn401_disable_link_output(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal) +{ + struct dc *dc = link->ctx->dc; + const struct link_hwss *link_hwss = get_link_hwss(link, link_res); + struct dmcu *dmcu = dc->res_pool->dmcu; + + if (signal == SIGNAL_TYPE_EDP && + link->dc->hwss.edp_backlight_control && + !link->skip_implict_edp_power_control) + link->dc->hwss.edp_backlight_control(link, false); + else if (dmcu != NULL && dmcu->funcs->lock_phy) + dmcu->funcs->lock_phy(dmcu); + + if (dc_is_tmds_signal(signal) && link->phy_state.symclk_ref_cnts.otg > 0) { + disable_link_output_symclk_on_tx_off(link, DP_UNKNOWN_ENCODING); + link->phy_state.symclk_state = SYMCLK_ON_TX_OFF; + } else { + link_hwss->disable_link_output(link, link_res, signal); + link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF; + } + + if (signal == SIGNAL_TYPE_EDP && + link->dc->hwss.edp_backlight_control && + !link->skip_implict_edp_power_control) + link->dc->hwss.edp_power_control(link, false); + else if (dmcu != NULL && dmcu->funcs->lock_phy) + dmcu->funcs->unlock_phy(dmcu); + + dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY); +} + void dcn401_set_cursor_position(struct pipe_ctx *pipe_ctx) { struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position; @@ -1426,6 +1493,10 @@ void dcn401_prepare_bandwidth(struct dc *dc, &context->bw_ctx.bw.dcn.watermarks, dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, false); + /* update timeout thresholds */ + if (hubbub->funcs->program_arbiter) { + dc->wm_optimized_required |= hubbub->funcs->program_arbiter(hubbub, &context->bw_ctx.bw.dcn.arb_regs, false); + } /* decrease compbuf size */ if (hubbub->funcs->program_compbuf_segments) { @@ -1467,6 +1538,10 @@ void dcn401_optimize_bandwidth( &context->bw_ctx.bw.dcn.watermarks, dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, true); + /* update timeout thresholds */ + if (hubbub->funcs->program_arbiter) { + hubbub->funcs->program_arbiter(hubbub, &context->bw_ctx.bw.dcn.arb_regs, true); + } if (dc->clk_mgr->dc_mode_softmax_enabled) if (dc->clk_mgr->clks.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 && @@ -1669,7 +1744,7 @@ void dcn401_hardware_release(struct dc *dc) } } -void dcn401_wait_for_det_buffer_update(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master) +void dcn401_wait_for_det_buffer_update_under_otg_master(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master) { struct pipe_ctx *opp_heads[MAX_PIPES]; struct pipe_ctx *dpp_pipes[MAX_PIPES]; @@ -1695,6 +1770,9 @@ void dcn401_wait_for_det_buffer_update(struct dc *dc, struct dc_state *context, hubbub->funcs->wait_for_det_update) hubbub->funcs->wait_for_det_update(hubbub, dpp_pipe->plane_res.hubp->inst); } + } else { + if (hubbub && opp_heads[slice_idx]->plane_res.hubp && hubbub->funcs->wait_for_det_update) + hubbub->funcs->wait_for_det_update(hubbub, opp_heads[slice_idx]->plane_res.hubp->inst); } } } @@ -1705,7 +1783,6 @@ void dcn401_interdependent_update_lock(struct dc *dc, unsigned int i = 0; struct pipe_ctx *pipe = NULL; struct timing_generator *tg = NULL; - bool pipe_unlocked[MAX_PIPES] = {0}; if (lock) { for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -1719,48 +1796,91 @@ void dcn401_interdependent_update_lock(struct dc *dc, dc->hwss.pipe_control_lock(dc, pipe, true); } } else { - /* Unlock pipes based on the change in DET allocation instead of pipe index - * Prevents over allocation of DET during unlock process - * e.g. 2 pipe config with different streams with a max of 20 DET segments - * Before: After: - * - Pipe0: 10 DET segments - Pipe0: 12 DET segments - * - Pipe1: 10 DET segments - Pipe1: 8 DET segments - * If Pipe0 gets updated first, 22 DET segments will be allocated - */ + /* Need to free DET being used first and have pipe update, then unlock the remaining pipes*/ for (i = 0; i < dc->res_pool->pipe_count; i++) { pipe = &context->res_ctx.pipe_ctx[i]; tg = pipe->stream_res.tg; - int current_pipe_idx = i; if (!resource_is_pipe_type(pipe, OTG_MASTER) || !tg->funcs->is_tg_enabled(tg) || dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { - pipe_unlocked[i] = true; continue; } - // If the same stream exists in old context, ensure the OTG_MASTER pipes for the same stream get compared - struct pipe_ctx *old_otg_master = resource_get_otg_master_for_stream(&dc->current_state->res_ctx, pipe->stream); - - if (old_otg_master) - current_pipe_idx = old_otg_master->pipe_idx; - if (resource_calculate_det_for_stream(context, pipe) < - resource_calculate_det_for_stream(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[current_pipe_idx])) { + if (dc->scratch.pipes_to_unlock_first[i]) { + struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; dc->hwss.pipe_control_lock(dc, pipe, false); - pipe_unlocked[i] = true; - dcn401_wait_for_det_buffer_update(dc, context, pipe); + /* Assumes pipe of the same index in current_state is also an OTG_MASTER pipe*/ + dcn401_wait_for_det_buffer_update_under_otg_master(dc, dc->current_state, old_pipe); } } + /* Unlocking the rest of the pipes */ for (i = 0; i < dc->res_pool->pipe_count; i++) { - if (pipe_unlocked[i]) + if (dc->scratch.pipes_to_unlock_first[i]) continue; + pipe = &context->res_ctx.pipe_ctx[i]; + tg = pipe->stream_res.tg; + if (!resource_is_pipe_type(pipe, OTG_MASTER) || + !tg->funcs->is_tg_enabled(tg) || + dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { + continue; + } + dc->hwss.pipe_control_lock(dc, pipe, false); } } } +void dcn401_perform_3dlut_wa_unlock(struct pipe_ctx *pipe_ctx) +{ + /* If 3DLUT FL is enabled and 3DLUT is in use, follow the workaround sequence for pipe unlock to make sure that + * HUBP will properly fetch 3DLUT contents after unlock. + * + * This is meant to work around a known HW issue where VREADY will cancel the pending 3DLUT_ENABLE signal regardless + * of whether OTG lock is currently being held or not. + */ + struct pipe_ctx *wa_pipes[MAX_PIPES] = { NULL }; + struct pipe_ctx *odm_pipe, *mpc_pipe; + int i, wa_pipe_ct = 0; + + for (odm_pipe = pipe_ctx; odm_pipe != NULL; odm_pipe = odm_pipe->next_odm_pipe) { + for (mpc_pipe = odm_pipe; mpc_pipe != NULL; mpc_pipe = mpc_pipe->bottom_pipe) { + if (mpc_pipe->plane_state && mpc_pipe->plane_state->mcm_luts.lut3d_data.lut3d_src + == DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM + && mpc_pipe->plane_state->mcm_shaper_3dlut_setting + == DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER_3DLUT) { + wa_pipes[wa_pipe_ct++] = mpc_pipe; + } + } + } + + if (wa_pipe_ct > 0) { + if (pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout) + pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout(pipe_ctx->stream_res.tg, true); + + for (i = 0; i < wa_pipe_ct; ++i) { + if (wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl) + wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl(wa_pipes[i]->plane_res.hubp, true); + } + + pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg); + if (pipe_ctx->stream_res.tg->funcs->wait_update_lock_status) + pipe_ctx->stream_res.tg->funcs->wait_update_lock_status(pipe_ctx->stream_res.tg, false); + + for (i = 0; i < wa_pipe_ct; ++i) { + if (wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl) + wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl(wa_pipes[i]->plane_res.hubp, true); + } + + if (pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout) + pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout(pipe_ctx->stream_res.tg, false); + } else { + pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg); + } +} + void dcn401_program_outstanding_updates(struct dc *dc, struct dc_state *context) { @@ -1770,3 +1890,125 @@ void dcn401_program_outstanding_updates(struct dc *dc, if (hubbub->funcs->program_compbuf_segments) hubbub->funcs->program_compbuf_segments(hubbub, context->bw_ctx.bw.dcn.arb_regs.compbuf_size, true); } + +void dcn401_reset_back_end_for_pipe( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context) +{ + struct dc_link *link = pipe_ctx->stream->link; + const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); + + DC_LOGGER_INIT(dc->ctx->logger); + if (pipe_ctx->stream_res.stream_enc == NULL) { + pipe_ctx->stream = NULL; + return; + } + + /* DPMS may already disable or */ + /* dpms_off status is incorrect due to fastboot + * feature. When system resume from S4 with second + * screen only, the dpms_off would be true but + * VBIOS lit up eDP, so check link status too. + */ + if (!pipe_ctx->stream->dpms_off || link->link_status.link_active) + dc->link_srv->set_dpms_off(pipe_ctx); + else if (pipe_ctx->stream_res.audio) + dc->hwss.disable_audio_stream(pipe_ctx); + + /* free acquired resources */ + if (pipe_ctx->stream_res.audio) { + /*disable az_endpoint*/ + pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); + + /*free audio*/ + if (dc->caps.dynamic_audio == true) { + /*we have to dynamic arbitrate the audio endpoints*/ + /*we free the resource, need reset is_audio_acquired*/ + update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, + pipe_ctx->stream_res.audio, false); + pipe_ctx->stream_res.audio = NULL; + } + } + + /* by upper caller loop, parent pipe: pipe0, will be reset last. + * back end share by all pipes and will be disable only when disable + * parent pipe. + */ + if (pipe_ctx->top_pipe == NULL) { + + dc->hwss.set_abm_immediate_disable(pipe_ctx); + + pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg); + + pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false); + if (pipe_ctx->stream_res.tg->funcs->set_odm_bypass) + pipe_ctx->stream_res.tg->funcs->set_odm_bypass( + pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); + + if (pipe_ctx->stream_res.tg->funcs->set_drr) + pipe_ctx->stream_res.tg->funcs->set_drr( + pipe_ctx->stream_res.tg, NULL); + /* TODO - convert symclk_ref_cnts for otg to a bit map to solve + * the case where the same symclk is shared across multiple otg + * instances + */ + if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) + link->phy_state.symclk_ref_cnts.otg = 0; + if (link->phy_state.symclk_state == SYMCLK_ON_TX_OFF) { + link_hwss->disable_link_output(link, + &pipe_ctx->link_res, pipe_ctx->stream->signal); + link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF; + } + + /* reset DTBCLK_P */ + if (dc->res_pool->dccg->funcs->set_dtbclk_p_src) + dc->res_pool->dccg->funcs->set_dtbclk_p_src(dc->res_pool->dccg, REFCLK, pipe_ctx->stream_res.tg->inst); + } + +/* + * In case of a dangling plane, setting this to NULL unconditionally + * causes failures during reset hw ctx where, if stream is NULL, + * it is expected that the pipe_ctx pointers to pipes and plane are NULL. + */ + pipe_ctx->stream = NULL; + pipe_ctx->top_pipe = NULL; + pipe_ctx->bottom_pipe = NULL; + pipe_ctx->next_odm_pipe = NULL; + pipe_ctx->prev_odm_pipe = NULL; + DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n", + pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst); +} + +void dcn401_reset_hw_ctx_wrap( + struct dc *dc, + struct dc_state *context) +{ + int i; + struct dce_hwseq *hws = dc->hwseq; + + /* Reset Back End*/ + for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) { + struct pipe_ctx *pipe_ctx_old = + &dc->current_state->res_ctx.pipe_ctx[i]; + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + + if (!pipe_ctx_old->stream) + continue; + + if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe) + continue; + + if (!pipe_ctx->stream || + pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) { + struct clock_source *old_clk = pipe_ctx_old->clock_source; + + if (hws->funcs.reset_back_end_for_pipe) + hws->funcs.reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state); + if (hws->funcs.enable_stream_gating) + hws->funcs.enable_stream_gating(dc, pipe_ctx_old); + if (old_clk) + old_clk->funcs->cs_power_down(old_clk); + } + } +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h index a27e62081685..28a513dfc005 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h @@ -55,6 +55,10 @@ void dcn401_populate_mcm_luts(struct dc *dc, bool lut_bank_a); void dcn401_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable); +void dcn401_disable_link_output(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal); + void dcn401_set_cursor_position(struct pipe_ctx *pipe_ctx); bool dcn401_apply_idle_power_optimizations(struct dc *dc, bool enable); @@ -81,7 +85,16 @@ void dcn401_hardware_release(struct dc *dc); void dcn401_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master); void adjust_hotspot_between_slices_for_2x_magnify(uint32_t cursor_width, struct dc_cursor_position *pos_cpy); -void dcn401_wait_for_det_buffer_update(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master); +void dcn401_wait_for_det_buffer_update_under_otg_master(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master); void dcn401_interdependent_update_lock(struct dc *dc, struct dc_state *context, bool lock); void dcn401_program_outstanding_updates(struct dc *dc, struct dc_state *context); +void dcn401_reset_back_end_for_pipe( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context); +void dcn401_reset_hw_ctx_wrap( + struct dc *dc, + struct dc_state *context); +void dcn401_perform_3dlut_wa_unlock(struct pipe_ctx *pipe_ctx); + #endif /* __DC_HWSS_DCN401_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c index a2ca07235c83..23e4f208152e 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c @@ -77,14 +77,14 @@ static const struct hw_sequencer_funcs dcn401_funcs = { .calc_vupdate_position = dcn10_calc_vupdate_position, .apply_idle_power_optimizations = dcn401_apply_idle_power_optimizations, .does_plane_fit_in_mall = NULL, - .set_backlight_level = dcn21_set_backlight_level, + .set_backlight_level = dcn31_set_backlight_level, .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, .hardware_release = dcn401_hardware_release, .set_pipe = dcn21_set_pipe, .enable_lvds_link_output = dce110_enable_lvds_link_output, .enable_tmds_link_output = dce110_enable_tmds_link_output, .enable_dp_link_output = dce110_enable_dp_link_output, - .disable_link_output = dcn32_disable_link_output, + .disable_link_output = dcn401_disable_link_output, .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, .get_dcc_en_bits = dcn10_get_dcc_en_bits, .enable_phantom_streams = dcn32_enable_phantom_streams, @@ -93,13 +93,13 @@ static const struct hw_sequencer_funcs dcn401_funcs = { .update_phantom_vp_position = dcn32_update_phantom_vp_position, .update_dsc_pg = dcn32_update_dsc_pg, .apply_update_flags_for_phantom = dcn32_apply_update_flags_for_phantom, - .blank_phantom = dcn32_blank_phantom, .wait_for_dcc_meta_propagation = dcn401_wait_for_dcc_meta_propagation, .is_pipe_topology_transition_seamless = dcn32_is_pipe_topology_transition_seamless, .fams2_global_control_lock = dcn401_fams2_global_control_lock, .fams2_update_config = dcn401_fams2_update_config, .fams2_global_control_lock_fast = dcn401_fams2_global_control_lock_fast, .program_outstanding_updates = dcn401_program_outstanding_updates, + .wait_for_all_pending_updates = dcn30_wait_for_all_pending_updates, }; static const struct hwseq_private_funcs dcn401_private_funcs = { @@ -111,7 +111,7 @@ static const struct hwseq_private_funcs dcn401_private_funcs = { .power_down = dce110_power_down, .enable_display_power_gating = dcn10_dummy_display_power_gating, .blank_pixel_data = dcn20_blank_pixel_data, - .reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap, + .reset_hw_ctx_wrap = dcn401_reset_hw_ctx_wrap, .enable_stream_timing = dcn401_enable_stream_timing, .edp_backlight_control = dce110_edp_backlight_control, .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, @@ -136,8 +136,9 @@ static const struct hwseq_private_funcs dcn401_private_funcs = { .update_mall_sel = dcn32_update_mall_sel, .calculate_dccg_k1_k2_values = NULL, .apply_single_controller_ctx_to_hw = dce110_apply_single_controller_ctx_to_hw, - .reset_back_end_for_pipe = dcn20_reset_back_end_for_pipe, + .reset_back_end_for_pipe = dcn401_reset_back_end_for_pipe, .populate_mcm_luts = NULL, + .perform_3dlut_wa_unlock = dcn401_perform_3dlut_wa_unlock, }; void dcn401_hw_sequencer_init_functions(struct dc *dc) diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h index ac9205625623..66fdc5805d0a 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h @@ -365,8 +365,7 @@ struct hw_sequencer_funcs { void (*clear_status_bits)(struct dc *dc, unsigned int mask); bool (*set_backlight_level)(struct pipe_ctx *pipe_ctx, - uint32_t backlight_pwm_u16_16, - uint32_t frame_ramp); + struct set_backlight_level_params *params); void (*set_abm_immediate_disable)(struct pipe_ctx *pipe_ctx); @@ -462,6 +461,7 @@ struct hw_sequencer_funcs { void (*program_outstanding_updates)(struct dc *dc, struct dc_state *context); void (*setup_hpo_hw_control)(const struct dce_hwseq *hws, bool enable); + void (*wait_for_all_pending_updates)(const struct pipe_ctx *pipe_ctx); }; void color_space_to_black_color( @@ -504,6 +504,10 @@ void get_mclk_switch_visual_confirm_color( struct pipe_ctx *pipe_ctx, struct tg_color *color); +void get_cursor_visual_confirm_color( + struct pipe_ctx *pipe_ctx, + struct tg_color *color); + void set_p_state_switch_method( struct dc *dc, struct dc_state *context, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h index 0ac675456979..22a5d4a03c98 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h @@ -182,6 +182,7 @@ struct hwseq_private_funcs { struct pipe_ctx *pipe_ctx, struct dc_cm2_func_luts mcm_luts, bool lut_bank_a); + void (*perform_3dlut_wa_unlock)(struct pipe_ctx *pipe_ctx); }; struct dce_hwseq { diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_status.h b/drivers/gpu/drm/amd/display/dc/inc/core_status.h index fa5edd03d004..b5afd8c3103d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_status.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_status.h @@ -60,5 +60,7 @@ enum dc_status { }; char *dc_status_to_str(enum dc_status status); +char *dc_pixel_encoding_to_str(enum dc_pixel_encoding pixel_encoding); +char *dc_color_depth_to_str(enum dc_color_depth color_depth); #endif /* _CORE_STATUS_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index bfb8b8502d20..2edd5b38ce4f 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -215,6 +215,11 @@ struct resource_funcs { void (*get_panel_config_defaults)(struct dc_panel_config *panel_config); void (*build_pipe_pix_clk_params)(struct pipe_ctx *pipe_ctx); + /* + * Get indicator of power from a context that went through full validation + */ + int (*get_power_profile)(const struct dc_state *context); + unsigned int (*get_det_buffer_size)(const struct dc_state *context); }; struct audio_support{ @@ -473,6 +478,8 @@ struct pipe_ctx { /* subvp_index: only valid if the pipe is a SUBVP_MAIN*/ uint8_t subvp_index; struct pixel_rate_divider pixel_rate_divider; + /* pixels borrowed from hblank to hactive */ + uint8_t hblank_borrow; }; /* Data used for dynamic link encoder assignment. diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h index 67c32401893e..52b745667ef7 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h @@ -228,6 +228,7 @@ struct hubbub_funcs { void (*program_det_segments)(struct hubbub *hubbub, int hubp_inst, unsigned det_buffer_size_seg); void (*program_compbuf_segments)(struct hubbub *hubbub, unsigned compbuf_size_seg, bool safe_to_increase); void (*wait_for_det_update)(struct hubbub *hubbub, int hubp_inst); + bool (*program_arbiter)(struct hubbub *hubbub, struct dml2_display_arb_regs *arb_regs, bool safe_to_lower); }; struct hubbub { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h index 3d4c8bd42b49..b74e18cc1e66 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h @@ -342,7 +342,11 @@ struct timing_generator_funcs { void (*wait_drr_doublebuffer_pending_clear)(struct timing_generator *tg); void (*set_long_vtotal)(struct timing_generator *optc, const struct long_vtotal_params *params); void (*wait_odm_doublebuffer_pending_clear)(struct timing_generator *tg); - bool (*get_double_buffer_pending)(struct timing_generator *tg); + bool (*get_optc_double_buffer_pending)(struct timing_generator *tg); + bool (*get_otg_double_buffer_pending)(struct timing_generator *tg); + bool (*get_pipe_update_pending)(struct timing_generator *tg); + void (*set_vupdate_keepout)(struct timing_generator *tg, bool enable); + bool (*wait_update_lock_status)(struct timing_generator *tg, bool locked); }; #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link.h index 72a8479e1f2d..f04292086c08 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/link.h +++ b/drivers/gpu/drm/amd/display/dc/inc/link.h @@ -248,8 +248,7 @@ struct link_service { uint32_t *backlight_millinits_avg, uint32_t *backlight_millinits_peak); bool (*edp_set_backlight_level)(const struct dc_link *link, - uint32_t backlight_pwm_u16_16, - uint32_t frame_ramp); + struct set_backlight_level_params *backlight_level_params); bool (*edp_set_backlight_level_nits)(struct dc_link *link, bool isHDR, uint32_t backlight_millinits, diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c index d21ee9d12d26..e026c728042a 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c @@ -48,6 +48,9 @@ #include "dm_helpers.h" #include "clk_mgr.h" + // Offset DPCD 050Eh == 0x5A +#define MST_HUB_ID_0x5A 0x5A + #define DC_LOGGER \ link->ctx->logger #define DC_LOGGER_INIT(logger) @@ -692,6 +695,15 @@ static void apply_dpia_mst_dsc_always_on_wa(struct dc_link *link) link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT && !link->dc->debug.dpia_debug.bits.disable_mst_dsc_work_around) link->wa_flags.dpia_mst_dsc_always_on = true; + + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && + link->type == dc_connection_mst_branch && + link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && + link->dpcd_caps.branch_vendor_specific_data[2] == MST_HUB_ID_0x5A && + link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT && + !link->dc->debug.dpia_debug.bits.disable_mst_dsc_work_around) { + link->wa_flags.dpia_mst_dsc_always_on = true; + } } static void revert_dpia_mst_dsc_always_on_wa(struct dc_link *link) diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c index c4e03482ba9a..5d66bfc7fe6e 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c @@ -808,7 +808,8 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) enum optc_dsc_mode optc_dsc_mode; /* Enable DSC hw block */ - dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt; + dsc_cfg.pic_width = (stream->timing.h_addressable + pipe_ctx->hblank_borrow + + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt; dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; dsc_cfg.color_depth = stream->timing.display_color_depth; @@ -2082,6 +2083,9 @@ static enum dc_status enable_link_dp(struct dc_state *state, if (link_settings->link_rate == LINK_RATE_LOW) skip_video_pattern = false; + if (stream->sink_patches.oled_optimize_display_on) + set_default_brightness_aux(link); + if (perform_link_training_with_retries(link_settings, skip_video_pattern, lt_attempts, @@ -2105,10 +2109,14 @@ static enum dc_status enable_link_dp(struct dc_state *state, if (link->dpcd_sink_ext_caps.bits.oled == 1 || link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1 || link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1) { - set_default_brightness_aux(link); - if (link->dpcd_sink_ext_caps.bits.oled == 1) - msleep(bl_oled_enable_delay); - edp_backlight_enable_aux(link, true); + if (!stream->sink_patches.oled_optimize_display_on) { + set_default_brightness_aux(link); + if (link->dpcd_sink_ext_caps.bits.oled == 1) + msleep(bl_oled_enable_delay); + edp_backlight_enable_aux(link, true); + } else { + edp_backlight_enable_aux(link, true); + } } return status; diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c index d78c8ec4de79..9dabaf682171 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c @@ -51,9 +51,10 @@ #include "dc_dmub_srv.h" #include "gpio_service_interface.h" +#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */ + #define DC_LOGGER \ link->ctx->logger -#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */ #ifndef MAX #define MAX(X, Y) ((X) > (Y) ? (X) : (Y)) @@ -1207,6 +1208,13 @@ static void get_active_converter_info( dp_hw_fw_revision.ieee_fw_rev, sizeof(dp_hw_fw_revision.ieee_fw_rev)); } + + core_link_read_dpcd( + link, + DP_BRANCH_VENDOR_SPECIFIC_START, + (uint8_t *)link->dpcd_caps.branch_vendor_specific_data, + sizeof(link->dpcd_caps.branch_vendor_specific_data)); + if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 && link->dpcd_caps.dongle_type != DISPLAY_DONGLE_NONE) { union dp_dfp_cap_ext dfp_cap_ext; @@ -1625,7 +1633,11 @@ static bool retrieve_link_cap(struct dc_link *link) } /* Read DP tunneling information. */ - status = dpcd_get_tunneling_device_data(link); + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { + status = dpcd_get_tunneling_device_data(link); + if (status != DC_OK) + dm_error("%s: Read tunneling device data failed.\n", __func__); + } dpcd_set_source_specific_data(link); /* Sink may need to configure internals based on vendor, so allow some @@ -1842,6 +1854,9 @@ static bool retrieve_link_cap(struct dc_link *link) DP_FEC_CAPABILITY, &link->dpcd_caps.fec_cap.raw, sizeof(link->dpcd_caps.fec_cap.raw)); + if (status != DC_OK) + DC_LOG_ERROR("%s:%d: core_link_read_dpcd (DP_FEC_CAPABILITY) failed\n", __func__, __LINE__); + status = core_link_read_dpcd( link, DP_DSC_SUPPORT, @@ -1864,6 +1879,9 @@ static bool retrieve_link_cap(struct dc_link *link) DP_DSC_BRANCH_OVERALL_THROUGHPUT_0, link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw, sizeof(link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw)); + if (status != DC_OK) + DC_LOG_ERROR("%s:%d: core_link_read_dpcd (DP_DSC_BRANCH_OVERALL_THROUGHPUT_0) failed\n", __func__, __LINE__); + DC_LOG_DSC("DSC branch decoder capability is read at link %d", link->link_index); DC_LOG_DSC("\tBRANCH_OVERALL_THROUGHPUT_0 = 0x%02x", link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_OVERALL_THROUGHPUT_0); @@ -2055,6 +2073,14 @@ void detect_edp_sink_caps(struct dc_link *link) core_link_read_dpcd(link, DP_SINK_PR_MAX_NUMBER_OF_DEVIATION_LINE, &link->dpcd_caps.pr_info.max_deviation_line, sizeof(link->dpcd_caps.pr_info.max_deviation_line)); + + /* + * OLED Emission Rate info + */ + if (link->dpcd_sink_ext_caps.bits.emission_output) + core_link_read_dpcd(link, DP_SINK_EMISSION_RATE, + (uint8_t *)&link->dpcd_caps.edp_oled_emission_rate, + sizeof(link->dpcd_caps.edp_oled_emission_rate)); } bool dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap) diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c index 6af42ba9885c..0d123e647652 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c @@ -59,12 +59,18 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link) dpcd_dp_tun_data, sizeof(dpcd_dp_tun_data)); + if (status != DC_OK) + goto err; + status = core_link_read_dpcd( link, DP_USB4_ROUTER_TOPOLOGY_ID, dpcd_topology_data, sizeof(dpcd_topology_data)); + if (status != DC_OK) + goto err; + link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.raw = dpcd_dp_tun_data[DP_TUNNELING_CAPABILITIES_SUPPORT - DP_TUNNELING_CAPABILITIES_SUPPORT]; link->dpcd_caps.usb4_dp_tun_info.dpia_info.raw = @@ -75,6 +81,7 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link) for (i = 0; i < DPCD_USB4_TOPOLOGY_ID_LEN; i++) link->dpcd_caps.usb4_dp_tun_info.usb4_topology_id[i] = dpcd_topology_data[i]; +err: return status; } diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c index 96bf135b6f05..48abeaa88678 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c @@ -221,21 +221,11 @@ static void handle_hpd_irq_replay_sink(struct dc_link *link) &replay_error_status.raw, sizeof(replay_error_status.raw)); - link->replay_settings.config.replay_error_status.bits.LINK_CRC_ERROR = - replay_error_status.bits.LINK_CRC_ERROR; - link->replay_settings.config.replay_error_status.bits.DESYNC_ERROR = - replay_configuration.bits.DESYNC_ERROR_STATUS; - link->replay_settings.config.replay_error_status.bits.STATE_TRANSITION_ERROR = - replay_configuration.bits.STATE_TRANSITION_ERROR_STATUS; - - if (link->replay_settings.config.replay_error_status.bits.LINK_CRC_ERROR || - link->replay_settings.config.replay_error_status.bits.DESYNC_ERROR || - link->replay_settings.config.replay_error_status.bits.STATE_TRANSITION_ERROR) { + if (replay_error_status.bits.LINK_CRC_ERROR || + replay_configuration.bits.DESYNC_ERROR_STATUS || + replay_configuration.bits.STATE_TRANSITION_ERROR_STATUS) { bool allow_active; - if (link->replay_settings.config.replay_error_status.bits.DESYNC_ERROR) - link->replay_settings.config.received_desync_error_hpd = 1; - if (link->replay_settings.config.force_disable_desync_error_check) return; diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c index 27b881f947e8..754c895e1bfb 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c @@ -272,7 +272,7 @@ void dp_wait_for_training_aux_rd_interval( struct dc_link *link, uint32_t wait_in_micro_secs) { - fsleep(wait_in_micro_secs); + usleep_range_state(wait_in_micro_secs, wait_in_micro_secs, TASK_UNINTERRUPTIBLE); DC_LOG_HW_LINK_TRAINING("%s:\n wait = %d\n", __func__, @@ -1107,9 +1107,13 @@ enum dc_status dpcd_set_link_settings( status = core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL, &downspread.raw, sizeof(downspread)); + if (status != DC_OK) + DC_LOG_ERROR("%s:%d: core_link_write_dpcd (DP_DOWNSPREAD_CTRL) failed\n", __func__, __LINE__); status = core_link_write_dpcd(link, DP_LANE_COUNT_SET, &lane_count_set.raw, 1); + if (status != DC_OK) + DC_LOG_ERROR("%s:%d: core_link_write_dpcd (DP_LANE_COUNT_SET) failed\n", __func__, __LINE__); if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13 && lt_settings->link_settings.use_link_rate_set == true) { @@ -1125,12 +1129,19 @@ enum dc_status dpcd_set_link_settings( supported_link_rates, sizeof(supported_link_rates)); } status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1); + if (status != DC_OK) + DC_LOG_ERROR("%s:%d: core_link_write_dpcd (DP_LINK_BW_SET) failed\n", __func__, __LINE__); + status = core_link_write_dpcd(link, DP_LINK_RATE_SET, <_settings->link_settings.link_rate_set, 1); + if (status != DC_OK) + DC_LOG_ERROR("%s:%d: core_link_write_dpcd (DP_LINK_RATE_SET) failed\n", __func__, __LINE__); } else { rate = get_dpcd_link_rate(<_settings->link_settings); status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1); + if (status != DC_OK) + DC_LOG_ERROR("%s:%d: core_link_write_dpcd (DP_LINK_BW_SET) failed\n", __func__, __LINE__); } if (rate) { diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c index b5cf75975fff..ccf8096dde29 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c @@ -412,7 +412,6 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence( /* 5. check CR done*/ if (dp_is_cr_done(lane_count, dpcd_lane_status)) { - status = LINK_TRAINING_SUCCESS; break; } diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c index 3aa05a2be6c0..e0e3bb865359 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c @@ -157,31 +157,13 @@ bool edp_set_backlight_level_nits(struct dc_link *link, uint32_t backlight_millinits, uint32_t transition_time_in_ms) { - struct dpcd_source_backlight_set dpcd_backlight_set; - uint8_t backlight_control = isHDR ? 1 : 0; - if (!link || (link->connector_signal != SIGNAL_TYPE_EDP && link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) return false; - // OLEDs have no PWM, they can only use AUX - if (link->dpcd_sink_ext_caps.bits.oled == 1) - backlight_control = 1; - - *(uint32_t *)&dpcd_backlight_set.backlight_level_millinits = backlight_millinits; - *(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms; - - - if (!link->dpcd_caps.panel_luminance_control) { - if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL, - (uint8_t *)(&dpcd_backlight_set), - sizeof(dpcd_backlight_set)) != DC_OK) - return false; - - if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL, - &backlight_control, 1) != DC_OK) - return false; - } else { + // use internal backlight control if dmub capabilities are not present + if (link->backlight_control_type == BACKLIGHT_CONTROL_VESA_AUX && + !link->dc->caps.dmub_caps.aux_backlight_support) { uint8_t backlight_enable = 0; struct target_luminance_value *target_luminance = NULL; @@ -205,6 +187,24 @@ bool edp_set_backlight_level_nits(struct dc_link *link, (uint8_t *)(target_luminance), sizeof(struct target_luminance_value)) != DC_OK) return false; + } else if (link->backlight_control_type == BACKLIGHT_CONTROL_AMD_AUX) { + struct dpcd_source_backlight_set dpcd_backlight_set; + *(uint32_t *)&dpcd_backlight_set.backlight_level_millinits = backlight_millinits; + *(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms; + + uint8_t backlight_control = isHDR ? 1 : 0; + // OLEDs have no PWM, they can only use AUX + if (link->dpcd_sink_ext_caps.bits.oled == 1) + backlight_control = 1; + + if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL, + (uint8_t *)(&dpcd_backlight_set), + sizeof(dpcd_backlight_set)) != DC_OK) + return false; + + if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL, + &backlight_control, 1) != DC_OK) + return false; } return true; @@ -519,11 +519,11 @@ static struct pipe_ctx *get_pipe_from_link(const struct dc_link *link) } bool edp_set_backlight_level(const struct dc_link *link, - uint32_t backlight_pwm_u16_16, - uint32_t frame_ramp) + struct set_backlight_level_params *backlight_level_params) { struct dc *dc = link->ctx->dc; - + uint32_t backlight_pwm_u16_16 = backlight_level_params->backlight_pwm_u16_16; + uint32_t frame_ramp = backlight_level_params->frame_ramp; DC_LOGGER_INIT(link->ctx->logger); DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", backlight_pwm_u16_16, backlight_pwm_u16_16); @@ -544,10 +544,11 @@ bool edp_set_backlight_level(const struct dc_link *link, return false; } + backlight_level_params->frame_ramp = frame_ramp; + dc->hwss.set_backlight_level( pipe_ctx, - backlight_pwm_u16_16, - frame_ramp); + backlight_level_params); } return true; } @@ -940,8 +941,7 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream struct replay_context replay_context = { 0 }; unsigned int lineTimeInNs = 0; - - union replay_enable_and_configuration replay_config; + union replay_enable_and_configuration replay_config = { 0 }; union dpcd_alpm_configuration alpm_config; @@ -1168,9 +1168,6 @@ static void edp_set_assr_enable(const struct dc *pDC, struct dc_link *link, link_enc_index = link->link_enc->transmitter - TRANSMITTER_UNIPHY_A; if (link_res->hpo_dp_link_enc) { - if (link->wa_flags.disable_assr_for_uhbr) - return; - link_enc_index = link_res->hpo_dp_link_enc->inst; use_hpo_dp_link_enc = true; } diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h index 30dc8c24c008..bcfa6ac5d4e7 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h @@ -36,8 +36,7 @@ bool edp_get_backlight_level_nits(struct dc_link *link, uint32_t *backlight_millinits_avg, uint32_t *backlight_millinits_peak); bool edp_set_backlight_level(const struct dc_link *link, - uint32_t backlight_pwm_u16_16, - uint32_t frame_ramp); + struct set_backlight_level_params *backlight_level_params); bool edp_set_backlight_level_nits(struct dc_link *link, bool isHDR, uint32_t backlight_millinits, diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h index b7a57f98553d..40757f20d73f 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h @@ -202,6 +202,7 @@ struct dcn_optc_registers { uint32_t OPTC_CLOCK_CONTROL; uint32_t OPTC_WIDTH_CONTROL2; uint32_t OTG_PSTATE_REGISTER; + uint32_t OTG_PIPE_UPDATE_STATUS; }; #define TG_COMMON_MASK_SH_LIST_DCN(mask_sh)\ @@ -566,6 +567,12 @@ struct dcn_optc_registers { type OTG_H_TIMING_DIV_MODE_DB_UPDATE_PENDING;\ type OPTC_DOUBLE_BUFFER_PENDING;\ +#define TG_REG_FIELD_LIST_DCN2_0(type) \ + type OTG_FLIP_PENDING;\ + type OTG_DC_REG_UPDATE_PENDING;\ + type OTG_CURSOR_UPDATE_PENDING;\ + type OTG_VUPDATE_KEEPOUT_STATUS;\ + #define TG_REG_FIELD_LIST_DCN3_2(type) \ type OTG_H_TIMING_DIV_MODE_MANUAL; @@ -600,6 +607,7 @@ struct dcn_optc_registers { struct dcn_optc_shift { TG_REG_FIELD_LIST(uint8_t) + TG_REG_FIELD_LIST_DCN2_0(uint8_t) TG_REG_FIELD_LIST_DCN3_2(uint8_t) TG_REG_FIELD_LIST_DCN3_5(uint8_t) TG_REG_FIELD_LIST_DCN401(uint8_t) @@ -607,6 +615,7 @@ struct dcn_optc_shift { struct dcn_optc_mask { TG_REG_FIELD_LIST(uint32_t) + TG_REG_FIELD_LIST_DCN2_0(uint32_t) TG_REG_FIELD_LIST_DCN3_2(uint32_t) TG_REG_FIELD_LIST_DCN3_5(uint32_t) TG_REG_FIELD_LIST_DCN401(uint32_t) diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.h index 364034b19028..928e110b95fb 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.h @@ -43,7 +43,8 @@ SRI(OPTC_MEMORY_CONFIG, ODM, inst),\ SR(DWB_SOURCE_SELECT),\ SRI(OTG_MANUAL_FLOW_CONTROL, OTG, inst), \ - SRI(OTG_DRR_CONTROL, OTG, inst) + SRI(OTG_DRR_CONTROL, OTG, inst),\ + SRI(OTG_PIPE_UPDATE_STATUS, OTG, inst) #define TG_COMMON_MASK_SH_LIST_DCN2_0(mask_sh)\ TG_COMMON_MASK_SH_LIST_DCN(mask_sh),\ @@ -53,6 +54,10 @@ SF(OTG0_OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, mask_sh),\ SF(OTG0_OTG_GLOBAL_CONTROL2, DIG_UPDATE_LOCATION, mask_sh),\ SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_RANGE_TIMING_DBUF_UPDATE_MODE, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_FLIP_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_DC_REG_UPDATE_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_CURSOR_UPDATE_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh),\ SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_START_X, mask_sh),\ SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_END_X, mask_sh), \ SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_START_Y, mask_sh),\ diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c index abcd03d78668..4c95c0958612 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c @@ -271,6 +271,48 @@ void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_c optc1->opp_count = opp_cnt; } +/* OTG status register that indicates OPTC update is pending */ +bool optc3_get_optc_double_buffer_pending(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + uint32_t update_pending = 0; + + REG_GET(OPTC_INPUT_GLOBAL_CONTROL, + OPTC_DOUBLE_BUFFER_PENDING, + &update_pending); + + return (update_pending == 1); +} + +/* OTG status register that indicates OTG update is pending */ +bool optc3_get_otg_update_pending(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + uint32_t update_pending = 0; + + REG_GET(OTG_DOUBLE_BUFFER_CONTROL, + OTG_UPDATE_PENDING, + &update_pending); + + return (update_pending == 1); +} + +/* OTG status register that indicates surface update is pending */ +bool optc3_get_pipe_update_pending(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + uint32_t flip_pending = 0; + uint32_t dc_update_pending = 0; + + REG_GET_2(OTG_PIPE_UPDATE_STATUS, + OTG_FLIP_PENDING, + &flip_pending, + OTG_DC_REG_UPDATE_PENDING, + &dc_update_pending); + + return (flip_pending == 1 || dc_update_pending == 1); +} + /** * optc3_set_timing_double_buffer() - DRR double buffering control * @@ -375,6 +417,9 @@ static struct timing_generator_funcs dcn30_tg_funcs = { .get_hw_timing = optc1_get_hw_timing, .wait_drr_doublebuffer_pending_clear = optc3_wait_drr_doublebuffer_pending_clear, .is_two_pixels_per_container = optc1_is_two_pixels_per_container, + .get_optc_double_buffer_pending = optc3_get_optc_double_buffer_pending, + .get_otg_double_buffer_pending = optc3_get_otg_update_pending, + .get_pipe_update_pending = optc3_get_pipe_update_pending, }; void dcn30_timing_generator_init(struct optc *optc1) diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.h index bda974d432ea..e2303f9eaf13 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.h @@ -109,7 +109,8 @@ SRI(OPTC_BYTES_PER_PIXEL, ODM, inst),\ SRI(OPTC_WIDTH_CONTROL, ODM, inst),\ SRI(OPTC_MEMORY_CONFIG, ODM, inst),\ - SR(DWB_SOURCE_SELECT) + SR(DWB_SOURCE_SELECT),\ + SRI(OTG_PIPE_UPDATE_STATUS, OTG, inst) #define DCN30_VTOTAL_REGS_SF(mask_sh) @@ -209,6 +210,7 @@ SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_GATE_DIS, mask_sh),\ SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_OCCURRED_STATUS, mask_sh),\ SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_CLEAR, mask_sh),\ + SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_DOUBLE_BUFFER_PENDING, mask_sh),\ SF(VTG0_CONTROL, VTG0_ENABLE, mask_sh),\ SF(VTG0_CONTROL, VTG0_FP2, mask_sh),\ SF(VTG0_CONTROL, VTG0_VCOUNT_INIT, mask_sh),\ @@ -319,7 +321,11 @@ SF(OTG0_OTG_DRR_V_TOTAL_CHANGE, OTG_DRR_V_TOTAL_CHANGE_LIMIT, mask_sh),\ SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE, mask_sh),\ SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_PENDING, mask_sh),\ - SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mask_sh) + SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_FLIP_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_DC_REG_UPDATE_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_CURSOR_UPDATE_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh),\ void dcn30_timing_generator_init(struct optc *optc1); @@ -356,4 +362,7 @@ void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_c void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc); void optc3_tg_init(struct timing_generator *optc); void optc3_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max); +bool optc3_get_optc_double_buffer_pending(struct timing_generator *optc); +bool optc3_get_otg_update_pending(struct timing_generator *optc); +bool optc3_get_pipe_update_pending(struct timing_generator *optc); #endif /* __DC_OPTC_DCN30_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c index 1a22ae89fb55..d7a45ef2d01b 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c @@ -169,6 +169,9 @@ static struct timing_generator_funcs dcn30_tg_funcs = { .get_hw_timing = optc1_get_hw_timing, .wait_drr_doublebuffer_pending_clear = optc3_wait_drr_doublebuffer_pending_clear, .is_two_pixels_per_container = optc1_is_two_pixels_per_container, + .get_optc_double_buffer_pending = optc3_get_optc_double_buffer_pending, + .get_otg_double_buffer_pending = optc3_get_otg_update_pending, + .get_pipe_update_pending = optc3_get_pipe_update_pending, }; void dcn301_timing_generator_init(struct optc *optc1) diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h index 30b81a448ce2..fbbe86d00c2e 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h @@ -99,7 +99,8 @@ SRI(OPTC_MEMORY_CONFIG, ODM, inst),\ SRI(OTG_CRC_CNTL2, OTG, inst),\ SR(DWB_SOURCE_SELECT),\ - SRI(OTG_DRR_CONTROL, OTG, inst) + SRI(OTG_DRR_CONTROL, OTG, inst),\ + SRI(OTG_PIPE_UPDATE_STATUS, OTG, inst) #define OPTC_COMMON_MASK_SH_LIST_DCN3_1(mask_sh)\ SF(OTG0_OTG_VSTARTUP_PARAM, VSTARTUP_START, mask_sh),\ @@ -254,7 +255,11 @@ SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_STREAM_COMBINE_MODE, mask_sh),\ SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_STREAM_SPLIT_MODE, mask_sh),\ SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_FORMAT, mask_sh),\ - SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh) + SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_FLIP_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_DC_REG_UPDATE_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_CURSOR_UPDATE_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh),\ void dcn31_timing_generator_init(struct optc *optc1); diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.h index 99c098e76116..0ff72b97b465 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.h @@ -98,7 +98,8 @@ SRI(OPTC_BYTES_PER_PIXEL, ODM, inst),\ SRI(OPTC_WIDTH_CONTROL, ODM, inst),\ SRI(OPTC_MEMORY_CONFIG, ODM, inst),\ - SRI(OTG_DRR_CONTROL, OTG, inst) + SRI(OTG_DRR_CONTROL, OTG, inst),\ + SRI(OTG_PIPE_UPDATE_STATUS, OTG, inst) #define OPTC_COMMON_MASK_SH_LIST_DCN3_14(mask_sh)\ SF(OTG0_OTG_VSTARTUP_PARAM, VSTARTUP_START, mask_sh),\ @@ -248,7 +249,11 @@ SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE, mask_sh),\ SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE_MANUAL, mask_sh),\ SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mask_sh),\ - SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh) + SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_FLIP_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_DC_REG_UPDATE_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_CURSOR_UPDATE_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh),\ void dcn314_timing_generator_init(struct optc *optc1); diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c index 00094f0e8470..c217f653b3c8 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c @@ -297,18 +297,6 @@ static void optc32_set_drr( optc32_setup_manual_trigger(optc); } -bool optc32_get_double_buffer_pending(struct timing_generator *optc) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - uint32_t update_pending = 0; - - REG_GET(OPTC_INPUT_GLOBAL_CONTROL, - OPTC_DOUBLE_BUFFER_PENDING, - &update_pending); - - return (update_pending == 1); -} - static struct timing_generator_funcs dcn32_tg_funcs = { .validate_timing = optc1_validate_timing, .program_timing = optc1_program_timing, @@ -373,7 +361,9 @@ static struct timing_generator_funcs dcn32_tg_funcs = { .setup_manual_trigger = optc2_setup_manual_trigger, .get_hw_timing = optc1_get_hw_timing, .is_two_pixels_per_container = optc1_is_two_pixels_per_container, - .get_double_buffer_pending = optc32_get_double_buffer_pending, + .get_optc_double_buffer_pending = optc3_get_optc_double_buffer_pending, + .get_otg_double_buffer_pending = optc3_get_otg_update_pending, + .get_pipe_update_pending = optc3_get_pipe_update_pending, }; void dcn32_timing_generator_init(struct optc *optc1) diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h index 665d7c52f67c..0b0964a9da74 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h @@ -177,7 +177,11 @@ SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE, mask_sh),\ SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE_MANUAL, mask_sh),\ SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mask_sh),\ - SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh) + SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_FLIP_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_DC_REG_UPDATE_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_CURSOR_UPDATE_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh) void dcn32_timing_generator_init(struct optc *optc1); void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode); @@ -185,6 +189,5 @@ void optc32_get_odm_combine_segments(struct timing_generator *tg, int *odm_combi void optc32_set_odm_bypass(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing); void optc32_wait_odm_doublebuffer_pending_clear(struct timing_generator *tg); -bool optc32_get_double_buffer_pending(struct timing_generator *optc); #endif /* __DC_OPTC_DCN32_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h index d077e2392379..be749ab41dce 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h @@ -67,7 +67,11 @@ SF(OTG0_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK, OTG_CRC1_WINDOWB_Y_END_READBACK, mask_sh),\ SF(OPTC_CLOCK_CONTROL, OPTC_FGCG_REP_DIS, mask_sh),\ SF(OTG0_OTG_V_COUNT_STOP_CONTROL, OTG_V_COUNT_STOP, mask_sh),\ - SF(OTG0_OTG_V_COUNT_STOP_CONTROL2, OTG_V_COUNT_STOP_TIMER, mask_sh) + SF(OTG0_OTG_V_COUNT_STOP_CONTROL2, OTG_V_COUNT_STOP_TIMER, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_FLIP_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_DC_REG_UPDATE_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_CURSOR_UPDATE_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh) void dcn35_timing_generator_init(struct optc *optc1); diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c index a5d6a7dca554..783ca9acc762 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c @@ -430,6 +430,35 @@ static void optc401_program_global_sync( REG_UPDATE(OTG_PSTATE_REGISTER, OTG_PSTATE_KEEPOUT_START, pstate_keepout); } +static void optc401_set_vupdate_keepout(struct timing_generator *tg, bool enable) +{ + struct optc *optc1 = DCN10TG_FROM_TG(tg); + + REG_SET_3(OTG_VUPDATE_KEEPOUT, 0, + MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, 0, + MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, optc1->vready_offset + 10, + OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, enable); + + return; +} + +static bool optc401_wait_update_lock_status(struct timing_generator *tg, bool locked) +{ + struct optc *optc1 = DCN10TG_FROM_TG(tg); + uint32_t lock_status = 0; + + REG_WAIT(OTG_MASTER_UPDATE_LOCK, + UPDATE_LOCK_STATUS, locked, + 1, 150000); + + REG_GET(OTG_MASTER_UPDATE_LOCK, UPDATE_LOCK_STATUS, &lock_status); + + if (lock_status != locked) + return false; + + return true; +} + static struct timing_generator_funcs dcn401_tg_funcs = { .validate_timing = optc1_validate_timing, .program_timing = optc1_program_timing, @@ -493,7 +522,11 @@ static struct timing_generator_funcs dcn401_tg_funcs = { .setup_manual_trigger = optc2_setup_manual_trigger, .get_hw_timing = optc1_get_hw_timing, .is_two_pixels_per_container = optc1_is_two_pixels_per_container, - .get_double_buffer_pending = optc32_get_double_buffer_pending, + .get_optc_double_buffer_pending = optc3_get_optc_double_buffer_pending, + .get_otg_double_buffer_pending = optc3_get_otg_update_pending, + .get_pipe_update_pending = optc3_get_pipe_update_pending, + .set_vupdate_keepout = optc401_set_vupdate_keepout, + .wait_update_lock_status = optc401_wait_update_lock_status, }; void dcn401_timing_generator_init(struct optc *optc1) diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h index bb13a645802d..1be89571986f 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h @@ -159,7 +159,11 @@ SF(OTG0_OTG_PSTATE_REGISTER, OTG_PSTATE_KEEPOUT_START, mask_sh),\ SF(OTG0_OTG_PSTATE_REGISTER, OTG_PSTATE_EXTEND, mask_sh),\ SF(OTG0_OTG_PSTATE_REGISTER, OTG_UNBLANK, mask_sh),\ - SF(OTG0_OTG_PSTATE_REGISTER, OTG_PSTATE_ALLOW_WIDTH_MIN, mask_sh) + SF(OTG0_OTG_PSTATE_REGISTER, OTG_PSTATE_ALLOW_WIDTH_MIN, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_FLIP_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_DC_REG_UPDATE_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_CURSOR_UPDATE_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh) void dcn401_timing_generator_init(struct optc *optc1); diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c index 53a5f4cb648c..e698543ec937 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c @@ -623,7 +623,7 @@ static struct link_encoder *dce100_link_encoder_create( kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); int link_regs_id; - if (!enc110) + if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; link_regs_id = diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c index 91da5cf85b69..035c6cfdaee5 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c @@ -668,7 +668,7 @@ static struct link_encoder *dce110_link_encoder_create( kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); int link_regs_id; - if (!enc110) + if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; link_regs_id = diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c index 162856c523e4..480a50967385 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c @@ -629,7 +629,7 @@ static struct link_encoder *dce112_link_encoder_create( kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); int link_regs_id; - if (!enc110) + if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; link_regs_id = diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c index 621825a51f46..c63c59623433 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c @@ -706,7 +706,7 @@ static struct link_encoder *dce120_link_encoder_create( kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); int link_regs_id; - if (!enc110) + if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; link_regs_id = diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c index a73d3c6ef425..3d5113f010bb 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c @@ -723,7 +723,7 @@ static struct link_encoder *dce80_link_encoder_create( kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); int link_regs_id; - if (!enc110) + if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; link_regs_id = diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c index 563c5eec83ff..770a380cc03d 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c @@ -533,7 +533,6 @@ static const struct dc_debug_options debug_defaults_drv = { .sanity_checks = true, .disable_dmcu = false, .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, /* raven smu dones't allow 0 disp clk, @@ -560,18 +559,6 @@ static const struct dc_debug_options debug_defaults_drv = { .using_dml2 = false, }; -static const struct dc_debug_options debug_defaults_diags = { - .disable_dmcu = false, - .force_abm_enable = false, - .timing_trace = true, - .clock_trace = true, - .disable_stutter = true, - .disable_pplib_clock_request = true, - .disable_pplib_wm_range = true, - .underflow_assert_delay_us = 0xFFFFFFFF, - .enable_legacy_fast_update = true, -}; - static void dcn10_dpp_destroy(struct dpp **dpp) { kfree(TO_DCN10_DPP(*dpp)); @@ -751,7 +738,7 @@ static struct link_encoder *dcn10_link_encoder_create( kzalloc(sizeof(struct dcn10_link_encoder), GFP_KERNEL); int link_regs_id; - if (!enc10) + if (!enc10 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; link_regs_id = @@ -1400,8 +1387,6 @@ static bool dcn10_resource_construct( if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; - else - dc->debug = debug_defaults_diags; /************************************************* * Create resources * diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c index eea2b3b307cd..7a5b9aa5292c 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c @@ -706,7 +706,6 @@ static const struct resource_caps res_cap_nv14 = { static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = false, .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = true, .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, @@ -920,7 +919,7 @@ struct link_encoder *dcn20_link_encoder_create( kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); int link_regs_id; - if (!enc20) + if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; link_regs_id = @@ -1511,6 +1510,7 @@ bool dcn20_split_stream_for_odm( if (prev_odm_pipe->plane_state) { struct scaler_data *sd = &prev_odm_pipe->plane_res.scl_data; + struct output_pixel_processor *opp = next_odm_pipe->stream_res.opp; int new_width; /* HACTIVE halved for odm combine */ @@ -1544,7 +1544,28 @@ bool dcn20_split_stream_for_odm( sd->viewport_c.x += dc_fixpt_floor(dc_fixpt_mul_int( sd->ratios.horz_c, sd->h_active - sd->recout.x)); sd->recout.x = 0; + + /* + * When odm is used in YcbCr422 or 420 colour space, a split screen + * will be seen with the previous calculations since the extra left + * edge pixel is accounted for in fmt but not in viewport. + * + * Below are calculations which fix the split by fixing the calculations + * if there is an extra left edge pixel. + */ + if (opp && opp->funcs->opp_get_left_edge_extra_pixel_count + && opp->funcs->opp_get_left_edge_extra_pixel_count( + opp, next_odm_pipe->stream->timing.pixel_encoding, + resource_is_pipe_type(next_odm_pipe, OTG_MASTER)) == 1) { + sd->h_active += 1; + sd->recout.width += 1; + sd->viewport.x -= dc_fixpt_ceil(dc_fixpt_mul_int(sd->ratios.horz, 1)); + sd->viewport_c.x -= dc_fixpt_ceil(dc_fixpt_mul_int(sd->ratios.horz, 1)); + sd->viewport_c.width += dc_fixpt_ceil(dc_fixpt_mul_int(sd->ratios.horz, 1)); + sd->viewport.width += dc_fixpt_ceil(dc_fixpt_mul_int(sd->ratios.horz, 1)); + } } + if (!next_odm_pipe->top_pipe) next_odm_pipe->stream_res.opp = pool->opps[next_odm_pipe->pipe_idx]; else @@ -2133,6 +2154,7 @@ bool dcn20_fast_validate_bw( ASSERT(0); } } + /* Actual dsc count per stream dsc validation*/ if (!dcn20_validate_dsc(dc, context)) { context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states] = diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c index fc54483b9104..d3d67d366523 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c @@ -600,7 +600,6 @@ static const struct dc_plane_cap plane_cap = { static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = true, .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = true, .pipe_split_policy = MPC_SPLIT_DYNAMIC, @@ -797,7 +796,7 @@ static struct link_encoder *dcn201_link_encoder_create( kzalloc(sizeof(struct dcn20_link_encoder), GFP_ATOMIC); struct dcn10_link_encoder *enc10; - if (!enc20) + if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; enc10 = &enc20->enc10; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c index 347e6aaea582..021ba8ac5c8c 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c @@ -610,7 +610,6 @@ static const struct dc_plane_cap plane_cap = { static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = false, .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = true, .min_disp_clk_khz = 100000, @@ -1298,7 +1297,7 @@ static struct link_encoder *dcn21_link_encoder_create( kzalloc(sizeof(struct dcn21_link_encoder), GFP_KERNEL); int link_regs_id; - if (!enc21) + if (!enc21 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; link_regs_id = diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c index 5040a4c6ed18..bfd0eccbed28 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c @@ -711,7 +711,6 @@ static const struct dc_plane_cap plane_cap = { static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = true, //No DMCU on DCN30 .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = true, .pipe_split_policy = MPC_SPLIT_DYNAMIC, @@ -927,7 +926,7 @@ static struct link_encoder *dcn30_link_encoder_create( struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - if (!enc20) + if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; dcn30_link_encoder_construct(enc20, @@ -2354,6 +2353,7 @@ static bool dcn30_resource_construct( dc->caps.dp_hdmi21_pcon_support = true; dc->caps.max_v_total = (1 << 15) - 1; + dc->caps.vtotal_limited_by_fp2 = true; /* read VBIOS LTTPR caps */ { diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c index 7d04739c3ba1..a9816affd312 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c @@ -682,7 +682,6 @@ static const struct dc_plane_cap plane_cap = { static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = true, .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, .disable_dpp_power_gate = false, .disable_hubp_power_gate = false, @@ -883,7 +882,7 @@ static struct link_encoder *dcn301_link_encoder_create( struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - if (!enc20) + if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; dcn301_link_encoder_construct(enc20, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c index 5791b5cc2875..7baefc910a3d 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c @@ -81,7 +81,6 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = true, .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = true, .pipe_split_policy = MPC_SPLIT_DYNAMIC, @@ -893,7 +892,7 @@ static struct link_encoder *dcn302_link_encoder_create( { struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - if (!enc20) + if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; dcn30_link_encoder_construct(enc20, enc_init_data, &link_enc_feature, @@ -1234,6 +1233,7 @@ static bool dcn302_resource_construct( dc->caps.extended_aux_timeout_support = true; dc->caps.dmcub_support = true; dc->caps.max_v_total = (1 << 15) - 1; + dc->caps.vtotal_limited_by_fp2 = true; /* Color pipeline capabilities */ dc->caps.color.dpp.dcn_arch = 1; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c index 63f0f882c861..8a57d46ad15f 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c @@ -82,7 +82,6 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = true, .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = true, .pipe_split_policy = MPC_SPLIT_AVOID, @@ -839,7 +838,7 @@ static struct link_encoder *dcn303_link_encoder_create( { struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - if (!enc20) + if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; dcn30_link_encoder_construct(enc20, enc_init_data, &link_enc_feature, @@ -1179,6 +1178,7 @@ static bool dcn303_resource_construct( dc->caps.extended_aux_timeout_support = true; dc->caps.dmcub_support = true; dc->caps.max_v_total = (1 << 15) - 1; + dc->caps.vtotal_limited_by_fp2 = true; /* Color pipeline capabilities */ dc->caps.color.dpp.dcn_arch = 1; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c index ac8cb20e2e3b..54ec3d8e920c 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c @@ -858,7 +858,6 @@ static const struct dc_plane_cap plane_cap = { static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = true, .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = false, .pipe_split_policy = MPC_SPLIT_DYNAMIC, @@ -869,7 +868,7 @@ static const struct dc_debug_options debug_defaults_drv = { .max_downscale_src_width = 4096,/*upto true 4K*/ .disable_pplib_wm_range = false, .scl_reset_length10 = true, - .sanity_checks = true, + .sanity_checks = false, .underflow_assert_delay_us = 0xFFFFFFFF, .dwb_fi_phase = -1, // -1 = disable, .dmub_command_table = true, @@ -1093,7 +1092,7 @@ static struct link_encoder *dcn31_link_encoder_create( struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - if (!enc20) + if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; dcn31_link_encoder_construct(enc20, @@ -1721,6 +1720,12 @@ int dcn31_populate_dml_pipes_from_context( return pipe_cnt; } +unsigned int dcn31_get_det_buffer_size( + const struct dc_state *context) +{ + return context->bw_ctx.dml.ip.det_buffer_size_kbytes; +} + void dcn31_calculate_wm_and_dlg( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, @@ -1843,6 +1848,7 @@ static struct resource_funcs dcn31_res_pool_funcs = { .update_bw_bounding_box = dcn31_update_bw_bounding_box, .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, .get_panel_config_defaults = dcn31_get_panel_config_defaults, + .get_det_buffer_size = dcn31_get_det_buffer_size, }; static struct clock_source *dcn30_clock_source_create( diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h index 901436591ed4..551ad912f7be 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h @@ -63,6 +63,9 @@ struct resource_pool *dcn31_create_resource_pool( const struct dc_init_data *init_data, struct dc *dc); +unsigned int dcn31_get_det_buffer_size( + const struct dc_state *context); + /*temp: B0 specific before switch to dcn313 headers*/ #ifndef regPHYPLLF_PIXCLK_RESYNC_CNTL #define regPHYPLLF_PIXCLK_RESYNC_CNTL 0x007e diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c index 169924d0a839..2794473f2aff 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c @@ -876,7 +876,6 @@ static const struct dc_debug_options debug_defaults_drv = { .replay_skip_crtc_disabled = true, .disable_dmcu = true, .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, .disable_dpp_power_gate = false, .disable_hubp_power_gate = false, @@ -889,7 +888,7 @@ static const struct dc_debug_options debug_defaults_drv = { .max_downscale_src_width = 4096,/*upto true 4k*/ .disable_pplib_wm_range = false, .scl_reset_length10 = true, - .sanity_checks = true, + .sanity_checks = false, .underflow_assert_delay_us = 0xFFFFFFFF, .dwb_fi_phase = -1, // -1 = disable, .dmub_command_table = true, @@ -1149,7 +1148,7 @@ static struct link_encoder *dcn31_link_encoder_create( struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - if (!enc20) + if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; dcn31_link_encoder_construct(enc20, @@ -1778,6 +1777,7 @@ static struct resource_funcs dcn314_res_pool_funcs = { .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, .get_panel_config_defaults = dcn314_get_panel_config_defaults, .get_preferred_eng_id_dpia = dcn314_get_preferred_eng_id_dpia, + .get_det_buffer_size = dcn31_get_det_buffer_size, }; static struct clock_source *dcn30_clock_source_create( diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c index 3f4b9dba4112..4ee33eb3381d 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c @@ -858,7 +858,6 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_z10 = true, /*hw not support it*/ .disable_dmcu = true, .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = false, .pipe_split_policy = MPC_SPLIT_DYNAMIC, @@ -1091,7 +1090,7 @@ static struct link_encoder *dcn31_link_encoder_create( struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - if (!enc20) + if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; dcn31_link_encoder_construct(enc20, @@ -1812,6 +1811,11 @@ static void dcn315_get_panel_config_defaults(struct dc_panel_config *panel_confi *panel_config = panel_config_defaults; } +static int dcn315_get_power_profile(const struct dc_state *context) +{ + return !context->bw_ctx.bw.dcn.clk.p_state_change_support; +} + static struct dc_cap_funcs cap_funcs = { .get_dcc_compression_cap = dcn20_get_dcc_compression_cap }; @@ -1840,6 +1844,8 @@ static struct resource_funcs dcn315_res_pool_funcs = { .update_bw_bounding_box = dcn315_update_bw_bounding_box, .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, .get_panel_config_defaults = dcn315_get_panel_config_defaults, + .get_power_profile = dcn315_get_power_profile, + .get_det_buffer_size = dcn31_get_det_buffer_size, }; static bool dcn315_resource_construct( diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c index 5fd52c5fcee4..79eddbafe3c2 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c @@ -853,7 +853,6 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_z10 = true, /*hw not support it*/ .disable_dmcu = true, .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = false, .pipe_split_policy = MPC_SPLIT_DYNAMIC, @@ -1085,7 +1084,7 @@ static struct link_encoder *dcn31_link_encoder_create( struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - if (!enc20) + if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; dcn31_link_encoder_construct(enc20, @@ -1720,6 +1719,7 @@ static struct resource_funcs dcn316_res_pool_funcs = { .update_bw_bounding_box = dcn316_update_bw_bounding_box, .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, .get_panel_config_defaults = dcn316_get_panel_config_defaults, + .get_det_buffer_size = dcn31_get_det_buffer_size, }; static bool dcn316_resource_construct( diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c index a124ad9bd108..12d247a7ec45 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c @@ -689,7 +689,6 @@ static const struct dc_plane_cap plane_cap = { static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = true, .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = false, .pipe_split_policy = MPC_SPLIT_AVOID, // Due to CRB, no need to MPC split anymore @@ -1039,7 +1038,7 @@ static struct link_encoder *dcn32_link_encoder_create( struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - if (!enc20) + if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; #undef REG_STRUCT @@ -1990,6 +1989,10 @@ unsigned int dcn32_calculate_mall_ways_from_bytes(const struct dc *dc, unsigned return 0; } + if (dc->caps.max_cab_allocation_bytes == 0) { + return 0xffffffff; + } + /* add 2 lines for worst case alignment */ cache_lines_used = total_size_in_mall_bytes / dc->caps.cache_line_size + 2; @@ -2186,6 +2189,7 @@ static bool dcn32_resource_construct( dc->caps.dmcub_support = true; dc->caps.seamless_odm = true; dc->caps.max_v_total = (1 << 15) - 1; + dc->caps.vtotal_limited_by_fp2 = true; /* Color pipeline capabilities */ dc->caps.color.dpp.dcn_arch = 1; @@ -2800,6 +2804,7 @@ struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_opp_head( free_pipe->plane_res.xfm = pool->transforms[free_pipe_idx]; free_pipe->plane_res.dpp = pool->dpps[free_pipe_idx]; free_pipe->plane_res.mpcc_inst = pool->dpps[free_pipe_idx]->inst; + free_pipe->hblank_borrow = otg_master->hblank_borrow; if (free_pipe->stream->timing.flags.DSC == 1) { dcn20_acquire_dsc(free_pipe->stream->ctx->dc, &new_ctx->res_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h index 7901792afb7b..86c6e5e8c42e 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h @@ -1054,7 +1054,8 @@ unsigned int dcn32_calculate_mall_ways_from_bytes(const struct dc *dc, unsigned SRI_ARR(OPTC_BYTES_PER_PIXEL, ODM, inst), \ SRI_ARR(OPTC_WIDTH_CONTROL, ODM, inst), \ SRI_ARR(OPTC_MEMORY_CONFIG, ODM, inst), \ - SRI_ARR(OTG_DRR_CONTROL, OTG, inst) + SRI_ARR(OTG_DRR_CONTROL, OTG, inst), \ + SRI_ARR(OTG_PIPE_UPDATE_STATUS, OTG, inst) /* HUBP */ diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c index 827a94f84f10..06b9479c8bd3 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c @@ -686,7 +686,6 @@ static const struct dc_plane_cap plane_cap = { static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = true, .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = false, .pipe_split_policy = MPC_SPLIT_AVOID, @@ -1035,7 +1034,7 @@ static struct link_encoder *dcn321_link_encoder_create( struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - if (!enc20) + if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; #undef REG_STRUCT @@ -1743,6 +1742,7 @@ static bool dcn321_resource_construct( dc->caps.extended_aux_timeout_support = true; dc->caps.dmcub_support = true; dc->caps.max_v_total = (1 << 15) - 1; + dc->caps.vtotal_limited_by_fp2 = true; /* Color pipeline capabilities */ dc->caps.color.dpp.dcn_arch = 1; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c index 893a9d9ee870..89e2adcf2a28 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c @@ -712,7 +712,6 @@ static const struct dc_plane_cap plane_cap = { static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = true, .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = false, .pipe_split_policy = MPC_SPLIT_AVOID, @@ -1074,7 +1073,7 @@ static struct link_encoder *dcn35_link_encoder_create( struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - if (!enc20) + if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; #undef REG_STRUCT @@ -1779,6 +1778,7 @@ static struct resource_funcs dcn35_res_pool_funcs = { .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, .get_panel_config_defaults = dcn35_get_panel_config_defaults, .get_preferred_eng_id_dpia = dcn35_get_preferred_eng_id_dpia, + .get_det_buffer_size = dcn31_get_det_buffer_size, }; static bool dcn35_resource_construct( @@ -1850,6 +1850,7 @@ static bool dcn35_resource_construct( dc->caps.zstate_support = true; dc->caps.ips_support = true; dc->caps.max_v_total = (1 << 15) - 1; + dc->caps.vtotal_limited_by_fp2 = true; /* Color pipeline capabilities */ dc->caps.color.dpp.dcn_arch = 1; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c index 70abd32ce2ad..263a37c1cd3a 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c @@ -692,7 +692,6 @@ static const struct dc_plane_cap plane_cap = { static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = true, .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = false, .pipe_split_policy = MPC_SPLIT_AVOID, @@ -1054,7 +1053,7 @@ static struct link_encoder *dcn35_link_encoder_create( struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - if (!enc20) + if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; #undef REG_STRUCT @@ -1758,6 +1757,7 @@ static struct resource_funcs dcn351_res_pool_funcs = { .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, .get_panel_config_defaults = dcn35_get_panel_config_defaults, .get_preferred_eng_id_dpia = dcn351_get_preferred_eng_id_dpia, + .get_det_buffer_size = dcn31_get_det_buffer_size, }; static bool dcn351_resource_construct( @@ -1829,6 +1829,7 @@ static bool dcn351_resource_construct( dc->caps.zstate_support = true; dc->caps.ips_support = true; dc->caps.max_v_total = (1 << 15) - 1; + dc->caps.vtotal_limited_by_fp2 = true; /* Color pipeline capabilities */ dc->caps.color.dpp.dcn_arch = 1; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c index 9d56fbdcd06a..2a3dabfe3cea 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c @@ -685,7 +685,6 @@ static const struct dc_plane_cap plane_cap = { static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = true, .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = false, .pipe_split_policy = MPC_SPLIT_AVOID, @@ -1032,7 +1031,7 @@ static struct link_encoder *dcn401_link_encoder_create( struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - if (!enc20) + if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; #undef REG_STRUCT @@ -1579,7 +1578,8 @@ static void dcn401_destroy_resource_pool(struct resource_pool **pool) } static struct dc_cap_funcs cap_funcs = { - .get_dcc_compression_cap = dcn20_get_dcc_compression_cap + .get_dcc_compression_cap = dcn20_get_dcc_compression_cap, + .get_subvp_en = dcn32_subvp_in_use, }; static void dcn401_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) @@ -1688,6 +1688,45 @@ static void dcn401_build_pipe_pix_clk_params(struct pipe_ctx *pipe_ctx) } } +static int dcn401_get_power_profile(const struct dc_state *context) +{ + int uclk_mhz = context->bw_ctx.bw.dcn.clk.dramclk_khz / 1000; + int dpm_level = 0; + + for (int i = 0; i < context->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels; i++) { + if (context->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz == 0 || + uclk_mhz < context->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz) + break; + if (uclk_mhz > context->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz) + dpm_level++; + } + + return dpm_level; +} + +static unsigned int dcn401_calc_num_avail_chans_for_mall(struct dc *dc, unsigned int num_chans) +{ + unsigned int num_available_chans = 1; + + /* channels for MALL must be a power of 2 */ + while (num_chans > 1) { + num_available_chans = (num_available_chans << 1); + num_chans = (num_chans >> 1); + } + + /* cannot be odd */ + num_available_chans &= ~1; + + /* clamp to max available channels for MALL per ASIC */ + if (ASICREV_IS_GC_12_0_0_A0(dc->ctx->asic_id.hw_internal_rev)) { + num_available_chans = num_available_chans > 16 ? 16 : num_available_chans; + } else if (ASICREV_IS_GC_12_0_1_A0(dc->ctx->asic_id.hw_internal_rev)) { + num_available_chans = num_available_chans > 8 ? 8 : num_available_chans; + } + + return num_available_chans; +} + static struct resource_funcs dcn401_res_pool_funcs = { .destroy = dcn401_destroy_resource_pool, .link_enc_create = dcn401_link_encoder_create, @@ -1714,6 +1753,7 @@ static struct resource_funcs dcn401_res_pool_funcs = { .prepare_mcache_programming = dcn401_prepare_mcache_programming, .build_pipe_pix_clk_params = dcn401_build_pipe_pix_clk_params, .calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes, + .get_power_profile = dcn401_get_power_profile, }; static uint32_t read_pipe_fuses(struct dc_context *ctx) @@ -1795,14 +1835,12 @@ static bool dcn401_resource_construct( dc->caps.min_horizontal_blanking_period = 80; dc->caps.dmdata_alloc_size = 2048; dc->caps.mall_size_per_mem_channel = 4; - /* total size = mall per channel * num channels * 1024 * 1024 */ - dc->caps.mall_size_total = dc->caps.mall_size_per_mem_channel * dc->ctx->dc_bios->vram_info.num_chans * 1048576; dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8; dc->caps.cache_line_size = 64; dc->caps.cache_num_ways = 16; /* Calculate the available MALL space */ - dc->caps.max_cab_allocation_bytes = dcn32_calc_num_avail_chans_for_mall( + dc->caps.max_cab_allocation_bytes = dcn401_calc_num_avail_chans_for_mall( dc, dc->ctx->dc_bios->vram_info.num_chans) * dc->caps.mall_size_per_mem_channel * 1024 * 1024; dc->caps.mall_size_total = dc->caps.max_cab_allocation_bytes; @@ -1826,6 +1864,7 @@ static bool dcn401_resource_construct( dc->caps.extended_aux_timeout_support = true; dc->caps.dmcub_support = true; dc->caps.max_v_total = (1 << 15) - 1; + dc->caps.vtotal_limited_by_fp2 = true; if (ASICREV_IS_GC_12_0_1_A0(dc->ctx->asic_id.hw_internal_rev)) dc->caps.dcc_plane_width_limit = 7680; @@ -1867,6 +1906,7 @@ static bool dcn401_resource_construct( dc->config.prefer_easf = true; dc->config.dc_mode_clk_limit_support = true; dc->config.enable_windowed_mpo_odm = true; + dc->config.set_pipe_unlock_order = true; /* Need to ensure DET gets freed before allocating */ /* read VBIOS LTTPR caps */ { if (ctx->dc_bios->funcs->get_lttpr_caps) { @@ -2132,6 +2172,7 @@ static bool dcn401_resource_construct( /* SPL */ spl_init_easf_filter_coeffs(); spl_init_blur_scale_coeffs(); + dc->caps.scl_caps.sharpener_support = true; return true; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h index 514d1ce20df9..19568c359669 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h @@ -536,8 +536,9 @@ void dcn401_prepare_mcache_programming(struct dc *dc, struct dc_state *context); SRI_ARR(OPTC_WIDTH_CONTROL, ODM, inst), \ SRI_ARR(OPTC_WIDTH_CONTROL2, ODM, inst), \ SRI_ARR(OPTC_MEMORY_CONFIG, ODM, inst), \ - SRI_ARR(OTG_DRR_CONTROL, OTG, inst), \ - SRI_ARR(OTG_PSTATE_REGISTER, OTG, inst) + SRI_ARR(OTG_DRR_CONTROL, OTG, inst), \ + SRI_ARR(OTG_PSTATE_REGISTER, OTG, inst), \ + SRI_ARR(OTG_PIPE_UPDATE_STATUS, OTG, inst) /* HUBBUB */ #define HUBBUB_REG_LIST_DCN4_01_RI(id) \ @@ -609,7 +610,10 @@ void dcn401_prepare_mcache_programming(struct dc *dc, struct dc_state *context); SR(DCHUBBUB_CLOCK_CNTL), \ SR(DCHUBBUB_SDPIF_CFG0), \ SR(DCHUBBUB_SDPIF_CFG1), \ - SR(DCHUBBUB_MEM_PWR_MODE_CTRL) + SR(DCHUBBUB_MEM_PWR_MODE_CTRL), \ + SR(DCHUBBUB_TIMEOUT_DETECTION_CTRL1), \ + SR(DCHUBBUB_TIMEOUT_DETECTION_CTRL2), \ + SR(DCHUBBUB_CTRL_STATUS) /* DCCG */ diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c b/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c index 014e8a296f0c..73a65913cb12 100644 --- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c +++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c @@ -99,7 +99,7 @@ static struct spl_rect calculate_plane_rec_in_timing_active( * * recout_x = 128 + round(plane_x * 2304 / 1920) * recout_w = 128 + round((plane_x + plane_w) * 2304 / 1920) - recout_x - * recout_y = 0 + round(plane_y * 1440 / 1280) + * recout_y = 0 + round(plane_y * 1440 / 1200) * recout_h = 0 + round((plane_y + plane_h) * 1440 / 1200) - recout_y * * NOTE: fixed point division is not error free. To reduce errors @@ -739,14 +739,13 @@ static enum scl_mode spl_get_dscl_mode(const struct spl_in *spl_in, return SCL_MODE_SCALING_444_RGB_ENABLE; } - /* Bypass YUV if at 1:1 with no ISHARP or if doing 2:1 YUV - * downscale without EASF + /* + * Bypass YUV if Y is 1:1 with no ISHARP + * Do not bypass UV at 1:1 for cositing to be applied */ - if ((!enable_isharp) && (!enable_easf)) { + if (!enable_isharp) { if (data->ratios.horz.value == one && data->ratios.vert.value == one) return SCL_MODE_SCALING_420_LUMA_BYPASS; - if (data->ratios.horz_c.value == one && data->ratios.vert_c.value == one) - return SCL_MODE_SCALING_420_CHROMA_BYPASS; } return SCL_MODE_SCALING_420_YCBCR_ENABLE; @@ -848,13 +847,13 @@ static bool spl_get_isharp_en(struct spl_in *spl_in, * surfaces based on policy setting */ if (!spl_is_yuv420(spl_in->basic_in.format) && - (spl_in->debug.sharpen_policy == SHARPEN_YUV)) + (spl_in->sharpen_policy == SHARPEN_YUV)) return enable_isharp; else if ((spl_is_yuv420(spl_in->basic_in.format) && !fullscreen) && - (spl_in->debug.sharpen_policy == SHARPEN_RGB_FULLSCREEN_YUV)) + (spl_in->sharpen_policy == SHARPEN_RGB_FULLSCREEN_YUV)) return enable_isharp; else if (!spl_in->is_fullscreen && - spl_in->debug.sharpen_policy == SHARPEN_FULLSCREEN_ALL) + spl_in->sharpen_policy == SHARPEN_FULLSCREEN_ALL) return enable_isharp; /* @@ -868,6 +867,60 @@ static bool spl_get_isharp_en(struct spl_in *spl_in, return enable_isharp; } +/* Calculate number of tap with adaptive scaling off */ +static void spl_get_taps_non_adaptive_scaler( + struct spl_scratch *spl_scratch, const struct spl_taps *in_taps) +{ + if (in_taps->h_taps == 0) { + if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.horz) > 1) + spl_scratch->scl_data.taps.h_taps = spl_min(2 * spl_fixpt_ceil( + spl_scratch->scl_data.ratios.horz), 8); + else + spl_scratch->scl_data.taps.h_taps = 4; + } else + spl_scratch->scl_data.taps.h_taps = in_taps->h_taps; + + if (in_taps->v_taps == 0) { + if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert) > 1) + spl_scratch->scl_data.taps.v_taps = spl_min(spl_fixpt_ceil(spl_fixpt_mul_int( + spl_scratch->scl_data.ratios.vert, 2)), 8); + else + spl_scratch->scl_data.taps.v_taps = 4; + } else + spl_scratch->scl_data.taps.v_taps = in_taps->v_taps; + + if (in_taps->v_taps_c == 0) { + if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert_c) > 1) + spl_scratch->scl_data.taps.v_taps_c = spl_min(spl_fixpt_ceil(spl_fixpt_mul_int( + spl_scratch->scl_data.ratios.vert_c, 2)), 8); + else + spl_scratch->scl_data.taps.v_taps_c = 4; + } else + spl_scratch->scl_data.taps.v_taps_c = in_taps->v_taps_c; + + if (in_taps->h_taps_c == 0) { + if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.horz_c) > 1) + spl_scratch->scl_data.taps.h_taps_c = spl_min(2 * spl_fixpt_ceil( + spl_scratch->scl_data.ratios.horz_c), 8); + else + spl_scratch->scl_data.taps.h_taps_c = 4; + } else if ((in_taps->h_taps_c % 2) != 0 && in_taps->h_taps_c != 1) + /* Only 1 and even h_taps_c are supported by hw */ + spl_scratch->scl_data.taps.h_taps_c = in_taps->h_taps_c - 1; + else + spl_scratch->scl_data.taps.h_taps_c = in_taps->h_taps_c; + + if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz)) + spl_scratch->scl_data.taps.h_taps = 1; + if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert)) + spl_scratch->scl_data.taps.v_taps = 1; + if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c)) + spl_scratch->scl_data.taps.h_taps_c = 1; + if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c)) + spl_scratch->scl_data.taps.v_taps_c = 1; + +} + /* Calculate optimal number of taps */ static bool spl_get_optimal_number_of_taps( int max_downscale_src_width, struct spl_in *spl_in, struct spl_scratch *spl_scratch, @@ -879,11 +932,26 @@ static bool spl_get_optimal_number_of_taps( int min_taps_y, min_taps_c; enum lb_memory_config lb_config; bool skip_easf = false; + bool is_ycbcr = spl_dscl_is_video_format(spl_in->basic_in.format); if (spl_scratch->scl_data.viewport.width > spl_scratch->scl_data.h_active && max_downscale_src_width != 0 && - spl_scratch->scl_data.viewport.width > max_downscale_src_width) + spl_scratch->scl_data.viewport.width > max_downscale_src_width) { + spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps); + *enable_easf_v = false; + *enable_easf_h = false; + *enable_isharp = false; return false; + } + + /* Disable adaptive scaler and sharpener when integer scaling is enabled */ + if (spl_in->scaling_quality.integer_scaling) { + spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps); + *enable_easf_v = false; + *enable_easf_h = false; + *enable_isharp = false; + return true; + } /* Check if we are using EASF or not */ skip_easf = enable_easf(spl_in, spl_scratch); @@ -893,43 +961,9 @@ static bool spl_get_optimal_number_of_taps( * From programming guide: taps = min{ ceil(2*H_RATIO,1), 8} for downscaling * taps = 4 for upscaling */ - if (skip_easf) { - if (in_taps->h_taps == 0) { - if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.horz) > 1) - spl_scratch->scl_data.taps.h_taps = spl_min(2 * spl_fixpt_ceil( - spl_scratch->scl_data.ratios.horz), 8); - else - spl_scratch->scl_data.taps.h_taps = 4; - } else - spl_scratch->scl_data.taps.h_taps = in_taps->h_taps; - if (in_taps->v_taps == 0) { - if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert) > 1) - spl_scratch->scl_data.taps.v_taps = spl_min(spl_fixpt_ceil(spl_fixpt_mul_int( - spl_scratch->scl_data.ratios.vert, 2)), 8); - else - spl_scratch->scl_data.taps.v_taps = 4; - } else - spl_scratch->scl_data.taps.v_taps = in_taps->v_taps; - if (in_taps->v_taps_c == 0) { - if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert_c) > 1) - spl_scratch->scl_data.taps.v_taps_c = spl_min(spl_fixpt_ceil(spl_fixpt_mul_int( - spl_scratch->scl_data.ratios.vert_c, 2)), 8); - else - spl_scratch->scl_data.taps.v_taps_c = 4; - } else - spl_scratch->scl_data.taps.v_taps_c = in_taps->v_taps_c; - if (in_taps->h_taps_c == 0) { - if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.horz_c) > 1) - spl_scratch->scl_data.taps.h_taps_c = spl_min(2 * spl_fixpt_ceil( - spl_scratch->scl_data.ratios.horz_c), 8); - else - spl_scratch->scl_data.taps.h_taps_c = 4; - } else if ((in_taps->h_taps_c % 2) != 0 && in_taps->h_taps_c != 1) - /* Only 1 and even h_taps_c are supported by hw */ - spl_scratch->scl_data.taps.h_taps_c = in_taps->h_taps_c - 1; - else - spl_scratch->scl_data.taps.h_taps_c = in_taps->h_taps_c; - } else { + if (skip_easf) + spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps); + else { if (spl_is_yuv420(spl_in->basic_in.format)) { spl_scratch->scl_data.taps.h_taps = 6; spl_scratch->scl_data.taps.v_taps = 6; @@ -954,7 +988,7 @@ static bool spl_get_optimal_number_of_taps( else lb_config = LB_MEMORY_CONFIG_0; // Determine max vtap support by calculating how much line buffer can fit - spl_in->funcs->spl_calc_lb_num_partitions(spl_in->basic_out.alpha_en, &spl_scratch->scl_data, + spl_in->callbacks.spl_calc_lb_num_partitions(spl_in->basic_out.alpha_en, &spl_scratch->scl_data, lb_config, &num_part_y, &num_part_c); /* MAX_V_TAPS = MIN (NUM_LINES - MAX(CEILING(V_RATIO,1)-2, 0), 8) */ if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert) > 2) @@ -1040,10 +1074,9 @@ static bool spl_get_optimal_number_of_taps( /* Sharpener requires scaler to be enabled, including for 1:1 * Check if ISHARP can be enabled - * If ISHARP is not enabled, for 1:1, set taps to 1 and disable - * EASF - * For case of 2:1 YUV where chroma is 1:1, set taps to 1 if - * EASF is not enabled + * If ISHARP is not enabled, set taps to 1 if ratio is 1:1 + * except for chroma taps. Keep previous taps so it can + * handle cositing */ *enable_isharp = spl_get_isharp_en(spl_in, spl_scratch); @@ -1053,20 +1086,28 @@ static bool spl_get_optimal_number_of_taps( spl_scratch->scl_data.taps.h_taps = 1; spl_scratch->scl_data.taps.v_taps = 1; - if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c)) + if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c) && !is_ycbcr) spl_scratch->scl_data.taps.h_taps_c = 1; - if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c)) + if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c) && !is_ycbcr) spl_scratch->scl_data.taps.v_taps_c = 1; *enable_easf_v = false; *enable_easf_h = false; } else { if ((!*enable_easf_h) && + (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz))) + spl_scratch->scl_data.taps.h_taps = 1; + + if ((!*enable_easf_v) && + (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert))) + spl_scratch->scl_data.taps.v_taps = 1; + + if ((!*enable_easf_h) && !is_ycbcr && (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c))) spl_scratch->scl_data.taps.h_taps_c = 1; - if ((!*enable_easf_v) && + if ((!*enable_easf_v) && !is_ycbcr && (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c))) spl_scratch->scl_data.taps.v_taps_c = 1; } @@ -1077,8 +1118,7 @@ static bool spl_get_optimal_number_of_taps( static void spl_set_black_color_data(enum spl_pixel_format format, struct scl_black_color *scl_black_color) { - bool ycbcr = format >= SPL_PIXEL_FORMAT_VIDEO_BEGIN - && format <= SPL_PIXEL_FORMAT_VIDEO_END; + bool ycbcr = spl_dscl_is_video_format(format); if (ycbcr) { scl_black_color->offset_rgb_y = BLACK_OFFSET_RGB_Y; scl_black_color->offset_rgb_cbcr = BLACK_OFFSET_CBCR; @@ -1590,7 +1630,8 @@ static void spl_set_isharp_data(struct dscl_prog_data *dscl_prog_data, spl_build_isharp_1dlut_from_reference_curve(ratio, setup, adp_sharpness, scale_to_sharpness_policy); - dscl_prog_data->isharp_delta = spl_get_pregen_filter_isharp_1D_lut(setup); + memcpy(dscl_prog_data->isharp_delta, spl_get_pregen_filter_isharp_1D_lut(setup), + sizeof(uint32_t) * ISHARP_LUT_TABLE_SIZE); dscl_prog_data->sharpness_level = adp_sharpness.sharpness_level; dscl_prog_data->isharp_en = 1; // ISHARP_EN @@ -1711,6 +1752,32 @@ static void spl_set_isharp_data(struct dscl_prog_data *dscl_prog_data, spl_set_blur_scale_data(dscl_prog_data, data); } +/* Calculate recout, scaling ratio, and viewport, then get optimal number of taps */ +static bool spl_calculate_number_of_taps(struct spl_in *spl_in, struct spl_scratch *spl_scratch, struct spl_out *spl_out, + bool *enable_easf_v, bool *enable_easf_h, bool *enable_isharp) +{ + bool res = false; + + memset(spl_scratch, 0, sizeof(struct spl_scratch)); + spl_scratch->scl_data.h_active = spl_in->h_active; + spl_scratch->scl_data.v_active = spl_in->v_active; + + // All SPL calls + /* recout calculation */ + /* depends on h_active */ + spl_calculate_recout(spl_in, spl_scratch, spl_out); + /* depends on pixel format */ + spl_calculate_scaling_ratios(spl_in, spl_scratch, spl_out); + /* depends on scaling ratios and recout, does not calculate offset yet */ + spl_calculate_viewport_size(spl_in, spl_scratch); + + res = spl_get_optimal_number_of_taps( + spl_in->basic_out.max_downscale_src_width, spl_in, + spl_scratch, &spl_in->scaling_quality, enable_easf_v, + enable_easf_h, enable_isharp); + return res; +} + /* Calculate scaler parameters */ bool spl_calculate_scaler_params(struct spl_in *spl_in, struct spl_out *spl_out) { @@ -1725,23 +1792,9 @@ bool spl_calculate_scaler_params(struct spl_in *spl_in, struct spl_out *spl_out) bool enable_isharp = false; const struct spl_scaler_data *data = &spl_scratch.scl_data; - memset(&spl_scratch, 0, sizeof(struct spl_scratch)); - spl_scratch.scl_data.h_active = spl_in->h_active; - spl_scratch.scl_data.v_active = spl_in->v_active; - - // All SPL calls - /* recout calculation */ - /* depends on h_active */ - spl_calculate_recout(spl_in, &spl_scratch, spl_out); - /* depends on pixel format */ - spl_calculate_scaling_ratios(spl_in, &spl_scratch, spl_out); - /* depends on scaling ratios and recout, does not calculate offset yet */ - spl_calculate_viewport_size(spl_in, &spl_scratch); + res = spl_calculate_number_of_taps(spl_in, &spl_scratch, spl_out, + &enable_easf_v, &enable_easf_h, &enable_isharp); - res = spl_get_optimal_number_of_taps( - spl_in->basic_out.max_downscale_src_width, spl_in, - &spl_scratch, &spl_in->scaling_quality, &enable_easf_v, - &enable_easf_h, &enable_isharp); /* * Depends on recout, scaling ratios, h_active and taps * May need to re-check lb size after this in some obscure scenario @@ -1753,12 +1806,12 @@ bool spl_calculate_scaler_params(struct spl_in *spl_in, struct spl_out *spl_out) // Clamp spl_clamp_viewport(&spl_scratch.scl_data.viewport); - if (!res) - return res; - // Save all calculated parameters in dscl_prog_data structure to program hw registers spl_set_dscl_prog_data(spl_in, &spl_scratch, spl_out, enable_easf_v, enable_easf_h, enable_isharp); + if (!res) + return res; + if (spl_in->lls_pref == LLS_PREF_YES) { if (spl_in->is_hdr_on) setup = HDR_L; @@ -1789,3 +1842,20 @@ bool spl_calculate_scaler_params(struct spl_in *spl_in, struct spl_out *spl_out) return res; } + +/* External interface to get number of taps only */ +bool spl_get_number_of_taps(struct spl_in *spl_in, struct spl_out *spl_out) +{ + bool res = false; + bool enable_easf_v = false; + bool enable_easf_h = false; + bool enable_isharp = false; + struct spl_scratch spl_scratch; + struct dscl_prog_data *dscl_prog_data = spl_out->dscl_prog_data; + const struct spl_scaler_data *data = &spl_scratch.scl_data; + + res = spl_calculate_number_of_taps(spl_in, &spl_scratch, spl_out, + &enable_easf_v, &enable_easf_h, &enable_isharp); + spl_set_taps_data(dscl_prog_data, data); + return res; +} diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.h b/drivers/gpu/drm/amd/display/dc/spl/dc_spl.h index 205e59a2a8ee..02a2d6725ed5 100644 --- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.h +++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl.h @@ -13,4 +13,6 @@ bool spl_calculate_scaler_params(struct spl_in *spl_in, struct spl_out *spl_out); +bool spl_get_number_of_taps(struct spl_in *spl_in, struct spl_out *spl_out); + #endif /* __DC_SPL_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h index afcc66206ca2..89af91e19b6c 100644 --- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h +++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h @@ -7,7 +7,6 @@ #include "dc_spl_types.h" -#define ISHARP_LUT_TABLE_SIZE 32 const uint32_t *spl_get_filter_isharp_1D_lut_0(void); const uint32_t *spl_get_filter_isharp_1D_lut_0p5x(void); const uint32_t *spl_get_filter_isharp_1D_lut_1p0x(void); diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h index 2a74ff5fdfdb..55d557df4aa5 100644 --- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h +++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h @@ -5,10 +5,8 @@ #ifndef __DC_SPL_TYPES_H__ #define __DC_SPL_TYPES_H__ +#include "spl_debug.h" #include "spl_os_types.h" // swap -#ifndef SPL_ASSERT -#define SPL_ASSERT(_bool) ((void *)0) -#endif #include "spl_fixpt31_32.h" // fixed31_32 and related functions #include "spl_custom_float.h" // custom float and related functions @@ -252,6 +250,7 @@ enum isharp_en { ISHARP_DISABLE, ISHARP_ENABLE }; +#define ISHARP_LUT_TABLE_SIZE 32 // Below struct holds values that can be directly used to program // hardware registers. No conversion/clamping is required struct dscl_prog_data { @@ -402,7 +401,7 @@ struct dscl_prog_data { uint32_t isharp_nl_en; // ISHARP_NL_EN ? TODO:check this struct isharp_lba isharp_lba; // ISHARP_LBA struct isharp_fmt isharp_fmt; // ISHARP_FMT - const uint32_t *isharp_delta; + uint32_t isharp_delta[ISHARP_LUT_TABLE_SIZE]; struct isharp_nldelta_sclip isharp_nldelta_sclip; // ISHARP_NLDELTA_SCLIP /* blur and scale filter */ const uint16_t *filter_blur_scale_v; @@ -498,7 +497,7 @@ enum scale_to_sharpness_policy { SCALE_TO_SHARPNESS_ADJ_YUV = 1, SCALE_TO_SHARPNESS_ADJ_ALL = 2 }; -struct spl_funcs { +struct spl_callbacks { void (*spl_calc_lb_num_partitions) (bool alpha_en, const struct spl_scaler_data *scl_data, @@ -510,7 +509,6 @@ struct spl_funcs { struct spl_debug { int visual_confirm_base_offset; int visual_confirm_dpp_offset; - enum sharpen_policy sharpen_policy; enum scale_to_sharpness_policy scale_to_sharpness_policy; }; @@ -520,7 +518,7 @@ struct spl_in { // Basic slice information int odm_slice_index; // ODM Slice Index using get_odm_split_index struct spl_taps scaling_quality; // Explicit Scaling Quality - struct spl_funcs *funcs; + struct spl_callbacks callbacks; // Inputs for isharp and EASF struct adaptive_sharpness adaptive_sharpness; // Adaptive Sharpness enum linear_light_scaling lls_pref; // Linear Light Scaling @@ -532,6 +530,7 @@ struct spl_in { int h_active; int v_active; int sdr_white_level_nits; + enum sharpen_policy sharpen_policy; }; // end of SPL inputs diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_debug.h b/drivers/gpu/drm/amd/display/dc/spl/spl_debug.h index 5696dafd0894..a6f6132df241 100644 --- a/drivers/gpu/drm/amd/display/dc/spl/spl_debug.h +++ b/drivers/gpu/drm/amd/display/dc/spl/spl_debug.h @@ -5,21 +5,26 @@ #ifndef SPL_DEBUG_H #define SPL_DEBUG_H -#ifdef SPL_ASSERT -#undef SPL_ASSERT -#endif -#define SPL_ASSERT(b) +#if defined(CONFIG_HAVE_KGDB) || defined(CONFIG_KGDB) +#define SPL_ASSERT_CRITICAL(expr) do { \ + if (WARN_ON(!(expr))) { \ + kgdb_breakpoint(); \ + } \ +} while (0) +#else +#define SPL_ASSERT_CRITICAL(expr) do { \ + if (WARN_ON(!(expr))) { \ + ; \ + } \ +} while (0) +#endif /* CONFIG_HAVE_KGDB || CONFIG_KGDB */ -#define SPL_ASSERT_CRITICAL(expr) do {if (expr)/* Do nothing */; } while (0) +#if defined(CONFIG_DEBUG_KERNEL_DC) +#define SPL_ASSERT(expr) SPL_ASSERT_CRITICAL(expr) +#else +#define SPL_ASSERT(expr) WARN_ON(!(expr)) +#endif /* CONFIG_DEBUG_KERNEL_DC */ -#ifdef SPL_DALMSG -#undef SPL_DALMSG -#endif -#define SPL_DALMSG(b) - -#ifdef SPL_DAL_ASSERT_MSG -#undef SPL_DAL_ASSERT_MSG -#endif -#define SPL_DAL_ASSERT_MSG(b, m) +#define SPL_BREAK_TO_DEBUGGER() SPL_ASSERT(0) #endif // SPL_DEBUG_H diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.c index a95565df5487..131f1e3949d3 100644 --- a/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.c +++ b/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.c @@ -22,14 +22,14 @@ static inline unsigned long long abs_i64( * result = dividend / divisor * *remainder = dividend % divisor */ -static inline unsigned long long complete_integer_division_u64( +static inline unsigned long long spl_complete_integer_division_u64( unsigned long long dividend, unsigned long long divisor, unsigned long long *remainder) { unsigned long long result; - ASSERT(divisor); + SPL_ASSERT(divisor); result = spl_div64_u64_rem(dividend, divisor, remainder); @@ -60,10 +60,10 @@ struct spl_fixed31_32 spl_fixpt_from_fraction(long long numerator, long long den /* determine integer part */ - unsigned long long res_value = complete_integer_division_u64( + unsigned long long res_value = spl_complete_integer_division_u64( arg1_value, arg2_value, &remainder); - ASSERT(res_value <= LONG_MAX); + SPL_ASSERT(res_value <= (unsigned long long)LONG_MAX); /* determine fractional part */ { @@ -85,7 +85,7 @@ struct spl_fixed31_32 spl_fixpt_from_fraction(long long numerator, long long den { unsigned long long summand = (remainder << 1) >= arg2_value; - ASSERT(res_value <= LLONG_MAX - summand); + SPL_ASSERT(res_value <= (unsigned long long)LLONG_MAX - summand); res_value += summand; } @@ -118,19 +118,19 @@ struct spl_fixed31_32 spl_fixpt_mul(struct spl_fixed31_32 arg1, struct spl_fixed res.value = arg1_int * arg2_int; - ASSERT(res.value <= (long long)LONG_MAX); + SPL_ASSERT(res.value <= (long long)LONG_MAX); res.value <<= FIXED31_32_BITS_PER_FRACTIONAL_PART; tmp = arg1_int * arg2_fra; - ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); + SPL_ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); res.value += tmp; tmp = arg2_int * arg1_fra; - ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); + SPL_ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); res.value += tmp; @@ -139,7 +139,7 @@ struct spl_fixed31_32 spl_fixpt_mul(struct spl_fixed31_32 arg1, struct spl_fixed tmp = (tmp >> FIXED31_32_BITS_PER_FRACTIONAL_PART) + (tmp >= (unsigned long long)spl_fixpt_half.value); - ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); + SPL_ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); res.value += tmp; @@ -163,17 +163,17 @@ struct spl_fixed31_32 spl_fixpt_sqr(struct spl_fixed31_32 arg) res.value = arg_int * arg_int; - ASSERT(res.value <= (long long)LONG_MAX); + SPL_ASSERT(res.value <= (long long)LONG_MAX); res.value <<= FIXED31_32_BITS_PER_FRACTIONAL_PART; tmp = arg_int * arg_fra; - ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); + SPL_ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); res.value += tmp; - ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); + SPL_ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); res.value += tmp; @@ -182,7 +182,7 @@ struct spl_fixed31_32 spl_fixpt_sqr(struct spl_fixed31_32 arg) tmp = (tmp >> FIXED31_32_BITS_PER_FRACTIONAL_PART) + (tmp >= (unsigned long long)spl_fixpt_half.value); - ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); + SPL_ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); res.value += tmp; @@ -196,7 +196,7 @@ struct spl_fixed31_32 spl_fixpt_recip(struct spl_fixed31_32 arg) * Good idea to use Newton's method */ - ASSERT(arg.value); + SPL_ASSERT(arg.value); return spl_fixpt_from_fraction( spl_fixpt_one.value, @@ -286,7 +286,7 @@ struct spl_fixed31_32 spl_fixpt_cos(struct spl_fixed31_32 arg) * * Calculated as Taylor series. */ -static struct spl_fixed31_32 fixed31_32_exp_from_taylor_series(struct spl_fixed31_32 arg) +static struct spl_fixed31_32 spl_fixed31_32_exp_from_taylor_series(struct spl_fixed31_32 arg) { unsigned int n = 9; @@ -295,7 +295,7 @@ static struct spl_fixed31_32 fixed31_32_exp_from_taylor_series(struct spl_fixed3 n + 1); /* TODO find correct res */ - ASSERT(spl_fixpt_lt(arg, spl_fixpt_one)); + SPL_ASSERT(spl_fixpt_lt(arg, spl_fixpt_one)); do res = spl_fixpt_add( @@ -337,22 +337,22 @@ struct spl_fixed31_32 spl_fixpt_exp(struct spl_fixed31_32 arg) spl_fixpt_ln2, m)); - ASSERT(m != 0); + SPL_ASSERT(m != 0); - ASSERT(spl_fixpt_lt( + SPL_ASSERT(spl_fixpt_lt( spl_fixpt_abs(r), spl_fixpt_one)); if (m > 0) return spl_fixpt_shl( - fixed31_32_exp_from_taylor_series(r), + spl_fixed31_32_exp_from_taylor_series(r), (unsigned char)m); else return spl_fixpt_div_int( - fixed31_32_exp_from_taylor_series(r), + spl_fixed31_32_exp_from_taylor_series(r), 1LL << -m); } else if (arg.value != 0) - return fixed31_32_exp_from_taylor_series(arg); + return spl_fixed31_32_exp_from_taylor_series(arg); else return spl_fixpt_one; } @@ -364,7 +364,7 @@ struct spl_fixed31_32 spl_fixpt_log(struct spl_fixed31_32 arg) struct spl_fixed31_32 error; - ASSERT(arg.value > 0); + SPL_ASSERT(arg.value > 0); /* TODO if arg is negative, return NaN */ /* TODO if arg is zero, return -INF */ @@ -396,7 +396,7 @@ struct spl_fixed31_32 spl_fixpt_log(struct spl_fixed31_32 arg) * part in 32 bits. It is used in hw programming (scaler) */ -static inline unsigned int ux_dy( +static inline unsigned int spl_ux_dy( long long value, unsigned int integer_bits, unsigned int fractional_bits) @@ -415,13 +415,13 @@ static inline unsigned int ux_dy( return result | fractional_part; } -static inline unsigned int clamp_ux_dy( +static inline unsigned int spl_clamp_ux_dy( long long value, unsigned int integer_bits, unsigned int fractional_bits, unsigned int min_clamp) { - unsigned int truncated_val = ux_dy(value, integer_bits, fractional_bits); + unsigned int truncated_val = spl_ux_dy(value, integer_bits, fractional_bits); if (value >= (1LL << (integer_bits + FIXED31_32_BITS_PER_FRACTIONAL_PART))) return (1 << (integer_bits + fractional_bits)) - 1; @@ -433,40 +433,40 @@ static inline unsigned int clamp_ux_dy( unsigned int spl_fixpt_u4d19(struct spl_fixed31_32 arg) { - return ux_dy(arg.value, 4, 19); + return spl_ux_dy(arg.value, 4, 19); } unsigned int spl_fixpt_u3d19(struct spl_fixed31_32 arg) { - return ux_dy(arg.value, 3, 19); + return spl_ux_dy(arg.value, 3, 19); } unsigned int spl_fixpt_u2d19(struct spl_fixed31_32 arg) { - return ux_dy(arg.value, 2, 19); + return spl_ux_dy(arg.value, 2, 19); } unsigned int spl_fixpt_u0d19(struct spl_fixed31_32 arg) { - return ux_dy(arg.value, 0, 19); + return spl_ux_dy(arg.value, 0, 19); } unsigned int spl_fixpt_clamp_u0d14(struct spl_fixed31_32 arg) { - return clamp_ux_dy(arg.value, 0, 14, 1); + return spl_clamp_ux_dy(arg.value, 0, 14, 1); } unsigned int spl_fixpt_clamp_u0d10(struct spl_fixed31_32 arg) { - return clamp_ux_dy(arg.value, 0, 10, 1); + return spl_clamp_ux_dy(arg.value, 0, 10, 1); } int spl_fixpt_s4d19(struct spl_fixed31_32 arg) { if (arg.value < 0) - return -(int)ux_dy(spl_fixpt_abs(arg).value, 4, 19); + return -(int)spl_ux_dy(spl_fixpt_abs(arg).value, 4, 19); else - return ux_dy(arg.value, 4, 19); + return spl_ux_dy(arg.value, 4, 19); } struct spl_fixed31_32 spl_fixpt_from_ux_dy(unsigned int value, diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.h b/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.h index 8a045e2f8699..ed2647f9a099 100644 --- a/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.h +++ b/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.h @@ -5,11 +5,8 @@ #ifndef __SPL_FIXED31_32_H__ #define __SPL_FIXED31_32_H__ -#include "os_types.h" +#include "spl_debug.h" #include "spl_os_types.h" // swap -#ifndef ASSERT -#define ASSERT(_bool) ((void *)0) -#endif #ifndef LLONG_MAX #define LLONG_MAX 9223372036854775807ll @@ -194,7 +191,7 @@ static inline struct spl_fixed31_32 spl_fixpt_clamp( */ static inline struct spl_fixed31_32 spl_fixpt_shl(struct spl_fixed31_32 arg, unsigned char shift) { - ASSERT(((arg.value >= 0) && (arg.value <= LLONG_MAX >> shift)) || + SPL_ASSERT(((arg.value >= 0) && (arg.value <= LLONG_MAX >> shift)) || ((arg.value < 0) && (arg.value >= ~(LLONG_MAX >> shift)))); arg.value = arg.value << shift; @@ -231,7 +228,7 @@ static inline struct spl_fixed31_32 spl_fixpt_add(struct spl_fixed31_32 arg1, st { struct spl_fixed31_32 res; - ASSERT(((arg1.value >= 0) && (LLONG_MAX - arg1.value >= arg2.value)) || + SPL_ASSERT(((arg1.value >= 0) && (LLONG_MAX - arg1.value >= arg2.value)) || ((arg1.value < 0) && (LLONG_MIN - arg1.value <= arg2.value))); res.value = arg1.value + arg2.value; @@ -256,7 +253,7 @@ static inline struct spl_fixed31_32 spl_fixpt_sub(struct spl_fixed31_32 arg1, st { struct spl_fixed31_32 res; - ASSERT(((arg2.value >= 0) && (LLONG_MIN + arg2.value <= arg1.value)) || + SPL_ASSERT(((arg2.value >= 0) && (LLONG_MIN + arg2.value <= arg1.value)) || ((arg2.value < 0) && (LLONG_MAX + arg2.value >= arg1.value))); res.value = arg1.value - arg2.value; @@ -448,7 +445,7 @@ static inline int spl_fixpt_round(struct spl_fixed31_32 arg) const long long summand = spl_fixpt_half.value; - ASSERT(LLONG_MAX - (long long)arg_value >= summand); + SPL_ASSERT(LLONG_MAX - (long long)arg_value >= summand); arg_value += summand; @@ -469,7 +466,7 @@ static inline int spl_fixpt_ceil(struct spl_fixed31_32 arg) const long long summand = spl_fixpt_one.value - spl_fixpt_epsilon.value; - ASSERT(LLONG_MAX - (long long)arg_value >= summand); + SPL_ASSERT(LLONG_MAX - (long long)arg_value >= summand); arg_value += summand; @@ -504,7 +501,7 @@ static inline struct spl_fixed31_32 spl_fixpt_truncate(struct spl_fixed31_32 arg bool negative = arg.value < 0; if (frac_bits >= FIXED31_32_BITS_PER_FRACTIONAL_PART) { - ASSERT(frac_bits == FIXED31_32_BITS_PER_FRACTIONAL_PART); + SPL_ASSERT(frac_bits == FIXED31_32_BITS_PER_FRACTIONAL_PART); return arg; } diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_os_types.h b/drivers/gpu/drm/amd/display/dc/spl/spl_os_types.h index 709706ed4f2c..2e6ba71960ac 100644 --- a/drivers/gpu/drm/amd/display/dc/spl/spl_os_types.h +++ b/drivers/gpu/drm/amd/display/dc/spl/spl_os_types.h @@ -6,6 +6,8 @@ #ifndef _SPL_OS_TYPES_H_ #define _SPL_OS_TYPES_H_ +#include "spl_debug.h" + #include <linux/slab.h> #include <linux/kgdb.h> #include <linux/kref.h> @@ -18,7 +20,6 @@ * general debug capabilities * */ -#define SPL_BREAK_TO_DEBUGGER() ASSERT(0) static inline uint64_t spl_div_u64_rem(uint64_t dividend, uint32_t divisor, uint32_t *remainder) { diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h index fe5b6f7a3eb1..b353c4ceb60d 100644 --- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h @@ -301,6 +301,7 @@ struct dmub_srv_hw_params { bool disallow_phy_access; bool disable_sldo_opt; bool enable_non_transparent_setconfig; + bool lower_hbr3_phy_ssc; }; /** @@ -570,6 +571,14 @@ struct dmub_notification { }; }; +/* enum dmub_ips_mode - IPS mode identifier */ +enum dmub_ips_mode { + DMUB_IPS_MODE_IPS1_MAX = 0, + DMUB_IPS_MODE_IPS2, + DMUB_IPS_MODE_IPS1_RCG, + DMUB_IPS_MODE_IPS1_ONO2_ON +}; + /** * DMUB firmware version helper macro - useful for checking if the version * of a firmware to know if feature or functionality is supported or present. diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index ebcf68bfae2b..b800a507d1e0 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -170,6 +170,11 @@ #pragma pack(push, 1) #define ABM_NUM_OF_ACE_SEGMENTS 5 +/** + * Debug FW state offset + */ +#define DMUB_DEBUG_FW_STATE_OFFSET 0x300 + union abm_flags { struct { /** @@ -490,6 +495,7 @@ struct dmub_feature_caps { uint8_t gecc_enable; uint8_t replay_supported; uint8_t replay_reserved[3]; + uint8_t abm_aux_backlight_support; }; struct dmub_visual_confirm_color { @@ -689,7 +695,8 @@ union dmub_fw_boot_options { uint32_t ips_disable: 3; /* options to disable ips support*/ uint32_t ips_sequential_ono: 1; /**< 1 to enable sequential ONO IPS sequence */ uint32_t disable_sldo_opt: 1; /**< 1 to disable SLDO optimizations */ - uint32_t reserved : 7; /**< reserved */ + uint32_t lower_hbr3_phy_ssc: 1; /**< 1 to lower hbr3 phy ssc to 0.125 percent */ + uint32_t reserved : 6; /**< reserved */ } bits; /**< boot bits */ uint32_t all; /**< 32-bit access to bits */ }; @@ -721,6 +728,7 @@ enum dmub_shared_state_feature_id { DMUB_SHARED_SHARE_FEATURE__INVALID = 0, DMUB_SHARED_SHARE_FEATURE__IPS_FW = 1, DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER = 2, + DMUB_SHARED_SHARE_FEATURE__DEBUG_SETUP = 3, DMUB_SHARED_STATE_FEATURE__LAST, /* Total number of features. */ }; @@ -747,7 +755,8 @@ union dmub_shared_state_ips_driver_signals { uint32_t allow_ips1 : 1; /**< 1 is IPS1 is allowed */ uint32_t allow_ips2 : 1; /**< 1 is IPS1 is allowed */ uint32_t allow_z10 : 1; /**< 1 if Z10 is allowed */ - uint32_t reserved_bits : 28; /**< Reversed bits */ + uint32_t allow_idle: 1; /**< 1 if driver is allowing idle */ + uint32_t reserved_bits : 27; /**< Reversed bits */ } bits; uint32_t all; }; @@ -757,6 +766,14 @@ union dmub_shared_state_ips_driver_signals { */ #define DMUB_SHARED_STATE__IPS_FW_VERSION 1 +struct dmub_shared_state_debug_setup { + union { + struct { + uint32_t exclude_points[62]; + } profile_mode; + }; +}; + /** * struct dmub_shared_state_ips_fw - Firmware state for IPS. */ @@ -809,6 +826,7 @@ struct dmub_shared_state_feature_block { struct dmub_shared_state_feature_common common; /**< Generic data */ struct dmub_shared_state_ips_fw ips_fw; /**< IPS firmware state */ struct dmub_shared_state_ips_driver ips_driver; /**< IPS driver state */ + struct dmub_shared_state_debug_setup debug_setup; /**< Debug setup */ } data; /**< Shared state data. */ }; /* 256-bytes, fixed */ @@ -1051,11 +1069,110 @@ enum dmub_gpint_command { DMUB_GPINT__GET_TRACE_BUFFER_MASK_WORD3 = 119, /** + * DESC: Set IPS residency measurement + * ARGS: 0 - Disable ips measurement + * 1 - Enable ips measurement + */ + DMUB_GPINT__IPS_RESIDENCY = 121, + + /** * DESC: Enable measurements for various task duration * ARGS: 0 - Disable measurement * 1 - Enable measurement */ DMUB_GPINT__TRACE_DMUB_WAKE_ACTIVITY = 123, + + /** + * DESC: Gets IPS residency in microseconds + * ARGS: 0 - Return IPS1 residency + * 1 - Return IPS2 residency + * 2 - Return IPS1_RCG residency + * 3 - Return IPS1_ONO2_ON residency + * RETURN: Total residency in microseconds - lower 32 bits + */ + DMUB_GPINT__GET_IPS_RESIDENCY_DURATION_US_LO = 124, + + /** + * DESC: Gets IPS1 histogram counts + * ARGS: Bucket index + * RETURN: Total count for the bucket + */ + DMUB_GPINT__GET_IPS1_HISTOGRAM_COUNTER = 125, + + /** + * DESC: Gets IPS2 histogram counts + * ARGS: Bucket index + * RETURN: Total count for the bucket + */ + DMUB_GPINT__GET_IPS2_HISTOGRAM_COUNTER = 126, + + /** + * DESC: Gets IPS residency + * ARGS: 0 - Return IPS1 residency + * 1 - Return IPS2 residency + * 2 - Return IPS1_RCG residency + * 3 - Return IPS1_ONO2_ON residency + * RETURN: Total residency in milli-percent. + */ + DMUB_GPINT__GET_IPS_RESIDENCY_PERCENT = 127, + + /** + * DESC: Gets IPS1_RCG histogram counts + * ARGS: Bucket index + * RETURN: Total count for the bucket + */ + DMUB_GPINT__GET_IPS1_RCG_HISTOGRAM_COUNTER = 128, + + /** + * DESC: Gets IPS1_ONO2_ON histogram counts + * ARGS: Bucket index + * RETURN: Total count for the bucket + */ + DMUB_GPINT__GET_IPS1_ONO2_ON_HISTOGRAM_COUNTER = 129, + + /** + * DESC: Gets IPS entry counter during residency measurement + * ARGS: 0 - Return IPS1 entry counts + * 1 - Return IPS2 entry counts + * 2 - Return IPS1_RCG entry counts + * 3 - Return IPS2_ONO2_ON entry counts + * RETURN: Entry counter for selected IPS mode + */ + DMUB_GPINT__GET_IPS_RESIDENCY_ENTRY_COUNTER = 130, + + /** + * DESC: Gets IPS inactive residency in microseconds + * ARGS: 0 - Return IPS1_MAX residency + * 1 - Return IPS2 residency + * 2 - Return IPS1_RCG residency + * 3 - Return IPS1_ONO2_ON residency + * RETURN: Total inactive residency in microseconds - lower 32 bits + */ + DMUB_GPINT__GET_IPS_INACTIVE_RESIDENCY_DURATION_US_LO = 131, + + /** + * DESC: Gets IPS inactive residency in microseconds + * ARGS: 0 - Return IPS1_MAX residency + * 1 - Return IPS2 residency + * 2 - Return IPS1_RCG residency + * 3 - Return IPS1_ONO2_ON residency + * RETURN: Total inactive residency in microseconds - upper 32 bits + */ + DMUB_GPINT__GET_IPS_INACTIVE_RESIDENCY_DURATION_US_HI = 132, + + /** + * DESC: Gets IPS residency in microseconds + * ARGS: 0 - Return IPS1 residency + * 1 - Return IPS2 residency + * 2 - Return IPS1_RCG residency + * 3 - Return IPS1_ONO2_ON residency + * RETURN: Total residency in microseconds - upper 32 bits + */ + DMUB_GPINT__GET_IPS_RESIDENCY_DURATION_US_HI = 133, + /** + * DESC: Setup debug configs. + */ + DMUB_GPINT__SETUP_DEBUG_MODE = 136, }; /** @@ -1306,9 +1423,10 @@ enum dmub_out_cmd_type { /* DMUB_CMD__DPIA command sub-types. */ enum dmub_cmd_dpia_type { DMUB_CMD__DPIA_DIG1_DPIA_CONTROL = 0, - DMUB_CMD__DPIA_SET_CONFIG_ACCESS = 1, + DMUB_CMD__DPIA_SET_CONFIG_ACCESS = 1, // will be replaced by DPIA_SET_CONFIG_REQUEST DMUB_CMD__DPIA_MST_ALLOC_SLOTS = 2, DMUB_CMD__DPIA_SET_TPS_NOTIFICATION = 3, + DMUB_CMD__DPIA_SET_CONFIG_REQUEST = 4, }; /* DMUB_OUT_CMD__DPIA_NOTIFICATION command types. */ @@ -2097,7 +2215,7 @@ struct dmub_rb_cmd_dig1_dpia_control { }; /** - * SET_CONFIG Command Payload + * SET_CONFIG Command Payload (deprecated) */ struct set_config_cmd_payload { uint8_t msg_type; /* set config message type */ @@ -2105,7 +2223,7 @@ struct set_config_cmd_payload { }; /** - * Data passed from driver to FW in a DMUB_CMD__DPIA_SET_CONFIG_ACCESS command. + * Data passed from driver to FW in a DMUB_CMD__DPIA_SET_CONFIG_ACCESS command. (deprecated) */ struct dmub_cmd_set_config_control_data { struct set_config_cmd_payload cmd_pkt; @@ -2114,6 +2232,17 @@ struct dmub_cmd_set_config_control_data { }; /** + * SET_CONFIG Request Command Payload + */ +struct set_config_request_cmd_payload { + uint8_t instance; /* DPIA instance */ + uint8_t immed_status; /* Immediate status returned in case of error */ + uint8_t msg_type; /* set config message type */ + uint8_t reserved; + uint32_t msg_data; /* set config message data */ +}; + +/** * DMUB command structure for SET_CONFIG command. */ struct dmub_rb_cmd_set_config_access { @@ -2122,6 +2251,14 @@ struct dmub_rb_cmd_set_config_access { }; /** + * DMUB command structure for SET_CONFIG request command. + */ +struct dmub_rb_cmd_set_config_request { + struct dmub_cmd_header header; /* header */ + struct set_config_request_cmd_payload payload; /* set config request payload */ +}; + +/** * Data passed from driver to FW in a DMUB_CMD__DPIA_MST_ALLOC_SLOTS command. */ struct dmub_cmd_mst_alloc_slots_control_data { @@ -4290,6 +4427,24 @@ struct dmub_rb_cmd_abm_set_pipe { }; /** + * Type of backlight control method to be used by ABM module + */ +enum dmub_backlight_control_type { + /** + * PWM Backlight control + */ + DMU_BACKLIGHT_CONTROL_PWM = 0, + /** + * VESA Aux-based backlight control + */ + DMU_BACKLIGHT_CONTROL_VESA_AUX = 1, + /** + * AMD DPCD Aux-based backlight control + */ + DMU_BACKLIGHT_CONTROL_AMD_AUX = 2, +}; + +/** * Data passed from driver to FW in a DMUB_CMD__ABM_SET_BACKLIGHT command. */ struct dmub_cmd_abm_set_backlight_data { @@ -4316,9 +4471,42 @@ struct dmub_cmd_abm_set_backlight_data { uint8_t panel_mask; /** + * AUX HW Instance. + */ + uint8_t aux_inst; + + /** * Explicit padding to 4 byte boundary. */ - uint8_t pad[2]; + uint8_t pad[1]; + + /** + * Backlight control type. + * Value 0 is PWM backlight control. + * Value 1 is VAUX backlight control. + * Value 2 is AMD DPCD AUX backlight control. + */ + enum dmub_backlight_control_type backlight_control_type; + + /** + * Minimum luminance in nits. + */ + uint32_t min_luminance; + + /** + * Maximum luminance in nits. + */ + uint32_t max_luminance; + + /** + * Minimum backlight in pwm. + */ + uint32_t min_backlight_pwm; + + /** + * Maximum backlight in pwm. + */ + uint32_t max_backlight_pwm; }; /** @@ -5022,7 +5210,34 @@ struct dmub_rb_cmd_get_usbc_cable_id { enum dmub_cmd_secure_display_type { DMUB_CMD__SECURE_DISPLAY_TEST_CMD = 0, /* test command to only check if inbox message works */ DMUB_CMD__SECURE_DISPLAY_CRC_STOP_UPDATE, - DMUB_CMD__SECURE_DISPLAY_CRC_WIN_NOTIFY + DMUB_CMD__SECURE_DISPLAY_CRC_WIN_NOTIFY, + DMUB_CMD__SECURE_DISPLAY_MULTIPLE_CRC_STOP_UPDATE, + DMUB_CMD__SECURE_DISPLAY_MULTIPLE_CRC_WIN_NOTIFY +}; + +#define MAX_ROI_NUM 2 + +struct dmub_cmd_roi_info { + uint16_t x_start; + uint16_t x_end; + uint16_t y_start; + uint16_t y_end; + uint8_t otg_id; + uint8_t phy_id; +}; + +struct dmub_cmd_roi_window_ctl { + uint16_t x_start; + uint16_t x_end; + uint16_t y_start; + uint16_t y_end; + bool enable; +}; + +struct dmub_cmd_roi_ctl_info { + uint8_t otg_id; + uint8_t phy_id; + struct dmub_cmd_roi_window_ctl roi_ctl[MAX_ROI_NUM]; }; /** @@ -5033,14 +5248,8 @@ struct dmub_rb_cmd_secure_display { /** * Data passed from driver to dmub firmware. */ - struct dmub_cmd_roi_info { - uint16_t x_start; - uint16_t x_end; - uint16_t y_start; - uint16_t y_end; - uint8_t otg_id; - uint8_t phy_id; - } roi_info; + struct dmub_cmd_roi_info roi_info; + struct dmub_cmd_roi_ctl_info mul_roi_ctl; }; /** @@ -5318,7 +5527,11 @@ union dmub_rb_cmd { /** * Definition of a DMUB_CMD__DPIA_SET_CONFIG_ACCESS command. */ - struct dmub_rb_cmd_set_config_access set_config_access; + struct dmub_rb_cmd_set_config_access set_config_access; // (deprecated) + /** + * Definition of a DMUB_CMD__DPIA_SET_CONFIG_ACCESS command. + */ + struct dmub_rb_cmd_set_config_request set_config_request; /** * Definition of a DMUB_CMD__DPIA_MST_ALLOC_SLOTS command. */ diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c index 2ccad79053c5..e5e77bd3c31e 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c @@ -426,6 +426,7 @@ void dmub_dcn35_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu boot_options.bits.ips_sequential_ono = params->ips_sequential_ono; boot_options.bits.disable_sldo_opt = params->disable_sldo_opt; boot_options.bits.enable_non_transparent_setconfig = params->enable_non_transparent_setconfig; + boot_options.bits.lower_hbr3_phy_ssc = params->lower_hbr3_phy_ssc; REG_WRITE(DMCUB_SCRATCH14, boot_options.all); } @@ -463,7 +464,7 @@ uint32_t dmub_dcn35_get_current_time(struct dmub_srv *dmub) void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data) { - uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset; + uint32_t is_dmub_enabled, is_soft_reset; uint32_t is_traceport_enabled, is_cw6_enabled; if (!dmub || !diag_data) @@ -513,9 +514,6 @@ void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset); diag_data->is_dmcub_soft_reset = is_soft_reset; - REG_GET(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS, &is_sec_reset); - diag_data->is_dmcub_secure_reset = is_sec_reset; - REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled); diag_data->is_traceport_en = is_traceport_enabled; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index db16066bc893..a3f3ff5d49ac 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -497,6 +497,7 @@ enum dmub_status const struct dmub_fw_meta_info *fw_info; uint32_t fw_state_size = DMUB_FW_STATE_SIZE; uint32_t trace_buffer_size = DMUB_TRACE_BUFFER_SIZE; + uint32_t shared_state_size = DMUB_FW_HEADER_SHARED_STATE_SIZE; uint32_t window_sizes[DMUB_WINDOW_TOTAL] = { 0 }; if (!dmub->sw_init) @@ -514,6 +515,7 @@ enum dmub_status fw_state_size = fw_info->fw_region_size; trace_buffer_size = fw_info->trace_buffer_size; + shared_state_size = fw_info->shared_state_size; /** * If DM didn't fill in a version, then fill it in based on @@ -534,7 +536,7 @@ enum dmub_status window_sizes[DMUB_WINDOW_5_TRACEBUFF] = trace_buffer_size; window_sizes[DMUB_WINDOW_6_FW_STATE] = fw_state_size; window_sizes[DMUB_WINDOW_7_SCRATCH_MEM] = DMUB_SCRATCH_MEM_SIZE; - window_sizes[DMUB_WINDOW_SHARED_STATE] = DMUB_FW_HEADER_SHARED_STATE_SIZE; + window_sizes[DMUB_WINDOW_SHARED_STATE] = max(DMUB_FW_HEADER_SHARED_STATE_SIZE, shared_state_size); out->fb_size = dmub_srv_calc_regions_for_memory_type(params, out, window_sizes, DMUB_WINDOW_MEMORY_TYPE_FB); diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h index aee5170f5fb2..de8f3cfed6c8 100644 --- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h +++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h @@ -164,18 +164,19 @@ enum dpcd_psr_sink_states { PSR_SINK_STATE_SINK_INTERNAL_ERROR = 7, }; -#define DP_SOURCE_SEQUENCE 0x30c -#define DP_SOURCE_TABLE_REVISION 0x310 -#define DP_SOURCE_PAYLOAD_SIZE 0x311 -#define DP_SOURCE_SINK_CAP 0x317 -#define DP_SOURCE_BACKLIGHT_LEVEL 0x320 -#define DP_SOURCE_BACKLIGHT_CURRENT_PEAK 0x326 -#define DP_SOURCE_BACKLIGHT_CONTROL 0x32E -#define DP_SOURCE_BACKLIGHT_ENABLE 0x32F -#define DP_SOURCE_MINIMUM_HBLANK_SUPPORTED 0x340 +#define DP_SOURCE_SEQUENCE 0x30C +#define DP_SOURCE_TABLE_REVISION 0x310 +#define DP_SOURCE_PAYLOAD_SIZE 0x311 +#define DP_SOURCE_SINK_CAP 0x317 +#define DP_SOURCE_BACKLIGHT_LEVEL 0x320 +#define DP_SOURCE_BACKLIGHT_CURRENT_PEAK 0x326 +#define DP_SOURCE_BACKLIGHT_CONTROL 0x32E +#define DP_SOURCE_BACKLIGHT_ENABLE 0x32F +#define DP_SOURCE_MINIMUM_HBLANK_SUPPORTED 0x340 #define DP_SINK_PR_REPLAY_STATUS 0x378 #define DP_SINK_PR_PIXEL_DEVIATION_PER_LINE 0x379 #define DP_SINK_PR_MAX_NUMBER_OF_DEVIATION_LINE 0x37A +#define DP_SINK_EMISSION_RATE 0x37E /* Remove once drm_dp_helper.h is updated upstream */ #ifndef DP_TOTAL_LTTPR_CNT diff --git a/drivers/gpu/drm/amd/display/include/logger_interface.h b/drivers/gpu/drm/amd/display/include/logger_interface.h index 02c23b04d34b..058f882d5bdd 100644 --- a/drivers/gpu/drm/amd/display/include/logger_interface.h +++ b/drivers/gpu/drm/amd/display/include/logger_interface.h @@ -52,10 +52,6 @@ void update_surface_trace( void post_surface_trace(struct dc *dc); -void context_timing_trace( - struct dc *dc, - struct resource_context *res_ctx); - void context_clock_trace( struct dc *dc, struct dc_state *context); diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h index a48d564d1660..4d68c1c6e210 100644 --- a/drivers/gpu/drm/amd/display/include/logger_types.h +++ b/drivers/gpu/drm/amd/display/include/logger_types.h @@ -61,11 +61,13 @@ #define DC_LOG_ALL_TF_CHANNELS(...) pr_debug("[GAMMA]:"__VA_ARGS__) #define DC_LOG_DSC(...) drm_dbg_dp((DC_LOGGER)->dev, __VA_ARGS__) #define DC_LOG_SMU(...) pr_debug("[SMU_MSG]:"__VA_ARGS__) -#define DC_LOG_MALL(...) pr_debug("[MALL]:"__VA_ARGS__) #define DC_LOG_DWB(...) drm_dbg((DC_LOGGER)->dev, __VA_ARGS__) #define DC_LOG_DP2(...) drm_dbg_dp((DC_LOGGER)->dev, __VA_ARGS__) #define DC_LOG_AUTO_DPM_TEST(...) pr_debug("[AutoDPMTest]: "__VA_ARGS__) #define DC_LOG_IPS(...) pr_debug("[IPS]: "__VA_ARGS__) +#define DC_LOG_MALL(...) pr_debug("[MALL]:"__VA_ARGS__) +#define DC_LOG_REGISTER_READ(...) pr_debug("[REGISTER_READ]: "__VA_ARGS__) +#define DC_LOG_REGISTER_WRITE(...) pr_debug("[REGISTER_WRITE]: "__VA_ARGS__) struct dc_log_buffer_ctx { char *buf; diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index 3699e633801d..a71df052cf25 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c @@ -1399,71 +1399,6 @@ static void scale_gamma_dx(struct pwl_float_data *pwl_rgb, pwl_rgb[i-1].b, 2), pwl_rgb[i-2].b); } -/* todo: all these scale_gamma functions are inherently the same but - * take different structures as params or different format for ramp - * values. We could probably implement it in a more generic fashion - */ -static void scale_user_regamma_ramp(struct pwl_float_data *pwl_rgb, - const struct regamma_ramp *ramp, - struct dividers dividers) -{ - unsigned short max_driver = 0xFFFF; - unsigned short max_os = 0xFF00; - unsigned short scaler = max_os; - uint32_t i; - struct pwl_float_data *rgb = pwl_rgb; - struct pwl_float_data *rgb_last = rgb + GAMMA_RGB_256_ENTRIES - 1; - - i = 0; - do { - if (ramp->gamma[i] > max_os || - ramp->gamma[i + 256] > max_os || - ramp->gamma[i + 512] > max_os) { - scaler = max_driver; - break; - } - i++; - } while (i != GAMMA_RGB_256_ENTRIES); - - i = 0; - do { - rgb->r = dc_fixpt_from_fraction( - ramp->gamma[i], scaler); - rgb->g = dc_fixpt_from_fraction( - ramp->gamma[i + 256], scaler); - rgb->b = dc_fixpt_from_fraction( - ramp->gamma[i + 512], scaler); - - ++rgb; - ++i; - } while (i != GAMMA_RGB_256_ENTRIES); - - rgb->r = dc_fixpt_mul(rgb_last->r, - dividers.divider1); - rgb->g = dc_fixpt_mul(rgb_last->g, - dividers.divider1); - rgb->b = dc_fixpt_mul(rgb_last->b, - dividers.divider1); - - ++rgb; - - rgb->r = dc_fixpt_mul(rgb_last->r, - dividers.divider2); - rgb->g = dc_fixpt_mul(rgb_last->g, - dividers.divider2); - rgb->b = dc_fixpt_mul(rgb_last->b, - dividers.divider2); - - ++rgb; - - rgb->r = dc_fixpt_mul(rgb_last->r, - dividers.divider3); - rgb->g = dc_fixpt_mul(rgb_last->g, - dividers.divider3); - rgb->b = dc_fixpt_mul(rgb_last->b, - dividers.divider3); -} - /* * RS3+ color transform DDI - 1D LUT adjustment is composed with regamma here * Input is evenly distributed in the output color space as specified in @@ -1663,106 +1598,6 @@ static bool calculate_interpolated_hardware_curve( return true; } -/* The "old" interpolation uses a complicated scheme to build an array of - * coefficients while also using an array of 0-255 normalized to 0-1 - * Then there's another loop using both of the above + new scaled user ramp - * and we concatenate them. It also searches for points of interpolation and - * uses enums for positions. - * - * This function uses a different approach: - * user ramp is always applied on X with 0/255, 1/255, 2/255, ..., 255/255 - * To find index for hwX , we notice the following: - * i/255 <= hwX < (i+1)/255 <=> i <= 255*hwX < i+1 - * See apply_lut_1d which is the same principle, but on 4K entry 1D LUT - * - * Once the index is known, combined Y is simply: - * user_ramp(index) + (hwX-index/255)*(user_ramp(index+1) - user_ramp(index) - * - * We should switch to this method in all cases, it's simpler and faster - * ToDo one day - for now this only applies to ADL regamma to avoid regression - * for regular use cases (sRGB and PQ) - */ -static void interpolate_user_regamma(uint32_t hw_points_num, - struct pwl_float_data *rgb_user, - bool apply_degamma, - struct dc_transfer_func_distributed_points *tf_pts) -{ - uint32_t i; - uint32_t color = 0; - int32_t index; - int32_t index_next; - struct fixed31_32 *tf_point; - struct fixed31_32 hw_x; - struct fixed31_32 norm_factor = - dc_fixpt_from_int(255); - struct fixed31_32 norm_x; - struct fixed31_32 index_f; - struct fixed31_32 lut1; - struct fixed31_32 lut2; - struct fixed31_32 delta_lut; - struct fixed31_32 delta_index; - const struct fixed31_32 one = dc_fixpt_from_int(1); - - i = 0; - /* fixed_pt library has problems handling too small values */ - while (i != 32) { - tf_pts->red[i] = dc_fixpt_zero; - tf_pts->green[i] = dc_fixpt_zero; - tf_pts->blue[i] = dc_fixpt_zero; - ++i; - } - while (i <= hw_points_num + 1) { - for (color = 0; color < 3; color++) { - if (color == 0) - tf_point = &tf_pts->red[i]; - else if (color == 1) - tf_point = &tf_pts->green[i]; - else - tf_point = &tf_pts->blue[i]; - - if (apply_degamma) { - if (color == 0) - hw_x = coordinates_x[i].regamma_y_red; - else if (color == 1) - hw_x = coordinates_x[i].regamma_y_green; - else - hw_x = coordinates_x[i].regamma_y_blue; - } else - hw_x = coordinates_x[i].x; - - if (dc_fixpt_le(one, hw_x)) - hw_x = one; - - norm_x = dc_fixpt_mul(norm_factor, hw_x); - index = dc_fixpt_floor(norm_x); - if (index < 0 || index > 255) - continue; - - index_f = dc_fixpt_from_int(index); - index_next = (index == 255) ? index : index + 1; - - if (color == 0) { - lut1 = rgb_user[index].r; - lut2 = rgb_user[index_next].r; - } else if (color == 1) { - lut1 = rgb_user[index].g; - lut2 = rgb_user[index_next].g; - } else { - lut1 = rgb_user[index].b; - lut2 = rgb_user[index_next].b; - } - - // we have everything now, so interpolate - delta_lut = dc_fixpt_sub(lut2, lut1); - delta_index = dc_fixpt_sub(norm_x, index_f); - - *tf_point = dc_fixpt_add(lut1, - dc_fixpt_mul(delta_index, delta_lut)); - } - ++i; - } -} - static void build_new_custom_resulted_curve( uint32_t hw_points_num, struct dc_transfer_func_distributed_points *tf_pts) @@ -1784,29 +1619,6 @@ static void build_new_custom_resulted_curve( } } -static void apply_degamma_for_user_regamma(struct pwl_float_data_ex *rgb_regamma, - uint32_t hw_points_num, struct calculate_buffer *cal_buffer) -{ - uint32_t i; - - struct gamma_coefficients coeff; - struct pwl_float_data_ex *rgb = rgb_regamma; - const struct hw_x_point *coord_x = coordinates_x; - - build_coefficients(&coeff, TRANSFER_FUNCTION_SRGB); - - i = 0; - while (i != hw_points_num + 1) { - rgb->r = translate_from_linear_space_ex( - coord_x->x, &coeff, 0, cal_buffer); - rgb->g = rgb->r; - rgb->b = rgb->r; - ++coord_x; - ++rgb; - ++i; - } -} - static bool map_regamma_hw_to_x_user( const struct dc_gamma *ramp, struct pixel_gamma_point *coeff128, @@ -1855,125 +1667,6 @@ static bool map_regamma_hw_to_x_user( #define _EXTRA_POINTS 3 -bool calculate_user_regamma_coeff(struct dc_transfer_func *output_tf, - const struct regamma_lut *regamma, - struct calculate_buffer *cal_buffer, - const struct dc_gamma *ramp) -{ - struct gamma_coefficients coeff; - const struct hw_x_point *coord_x = coordinates_x; - uint32_t i = 0; - - do { - coeff.a0[i] = dc_fixpt_from_fraction( - regamma->coeff.A0[i], 10000000); - coeff.a1[i] = dc_fixpt_from_fraction( - regamma->coeff.A1[i], 1000); - coeff.a2[i] = dc_fixpt_from_fraction( - regamma->coeff.A2[i], 1000); - coeff.a3[i] = dc_fixpt_from_fraction( - regamma->coeff.A3[i], 1000); - coeff.user_gamma[i] = dc_fixpt_from_fraction( - regamma->coeff.gamma[i], 1000); - - ++i; - } while (i != 3); - - i = 0; - /* fixed_pt library has problems handling too small values */ - while (i != 32) { - output_tf->tf_pts.red[i] = dc_fixpt_zero; - output_tf->tf_pts.green[i] = dc_fixpt_zero; - output_tf->tf_pts.blue[i] = dc_fixpt_zero; - ++coord_x; - ++i; - } - while (i != MAX_HW_POINTS + 1) { - output_tf->tf_pts.red[i] = translate_from_linear_space_ex( - coord_x->x, &coeff, 0, cal_buffer); - output_tf->tf_pts.green[i] = translate_from_linear_space_ex( - coord_x->x, &coeff, 1, cal_buffer); - output_tf->tf_pts.blue[i] = translate_from_linear_space_ex( - coord_x->x, &coeff, 2, cal_buffer); - ++coord_x; - ++i; - } - - if (ramp && ramp->type == GAMMA_CS_TFM_1D) - apply_lut_1d(ramp, MAX_HW_POINTS, &output_tf->tf_pts); - - // this function just clamps output to 0-1 - build_new_custom_resulted_curve(MAX_HW_POINTS, &output_tf->tf_pts); - output_tf->type = TF_TYPE_DISTRIBUTED_POINTS; - - return true; -} - -bool calculate_user_regamma_ramp(struct dc_transfer_func *output_tf, - const struct regamma_lut *regamma, - struct calculate_buffer *cal_buffer, - const struct dc_gamma *ramp) -{ - struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts; - struct dividers dividers; - - struct pwl_float_data *rgb_user = NULL; - struct pwl_float_data_ex *rgb_regamma = NULL; - bool ret = false; - - if (regamma == NULL) - return false; - - output_tf->type = TF_TYPE_DISTRIBUTED_POINTS; - - rgb_user = kcalloc(GAMMA_RGB_256_ENTRIES + _EXTRA_POINTS, - sizeof(*rgb_user), - GFP_KERNEL); - if (!rgb_user) - goto rgb_user_alloc_fail; - - rgb_regamma = kcalloc(MAX_HW_POINTS + _EXTRA_POINTS, - sizeof(*rgb_regamma), - GFP_KERNEL); - if (!rgb_regamma) - goto rgb_regamma_alloc_fail; - - dividers.divider1 = dc_fixpt_from_fraction(3, 2); - dividers.divider2 = dc_fixpt_from_int(2); - dividers.divider3 = dc_fixpt_from_fraction(5, 2); - - scale_user_regamma_ramp(rgb_user, ®amma->ramp, dividers); - - if (regamma->flags.bits.applyDegamma == 1) { - apply_degamma_for_user_regamma(rgb_regamma, MAX_HW_POINTS, cal_buffer); - copy_rgb_regamma_to_coordinates_x(coordinates_x, - MAX_HW_POINTS, rgb_regamma); - } - - interpolate_user_regamma(MAX_HW_POINTS, rgb_user, - regamma->flags.bits.applyDegamma, tf_pts); - - // no custom HDR curves! - tf_pts->end_exponent = 0; - tf_pts->x_point_at_y1_red = 1; - tf_pts->x_point_at_y1_green = 1; - tf_pts->x_point_at_y1_blue = 1; - - if (ramp && ramp->type == GAMMA_CS_TFM_1D) - apply_lut_1d(ramp, MAX_HW_POINTS, &output_tf->tf_pts); - - // this function just clamps output to 0-1 - build_new_custom_resulted_curve(MAX_HW_POINTS, tf_pts); - - ret = true; - - kfree(rgb_regamma); -rgb_regamma_alloc_fail: - kfree(rgb_user); -rgb_user_alloc_fail: - return ret; -} - bool mod_color_calculate_degamma_params(struct dc_color_caps *dc_caps, struct dc_transfer_func *input_tf, const struct dc_gamma *ramp, bool map_user_ramp) diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h index ee5c466613de..97e55278940e 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h @@ -115,15 +115,4 @@ bool mod_color_calculate_degamma_params(struct dc_color_caps *dc_caps, struct dc_transfer_func *output_tf, const struct dc_gamma *ramp, bool mapUserRamp); -bool calculate_user_regamma_coeff(struct dc_transfer_func *output_tf, - const struct regamma_lut *regamma, - struct calculate_buffer *cal_buffer, - const struct dc_gamma *ramp); - -bool calculate_user_regamma_ramp(struct dc_transfer_func *output_tf, - const struct regamma_lut *regamma, - struct calculate_buffer *cal_buffer, - const struct dc_gamma *ramp); - - #endif /* COLOR_MOD_COLOR_GAMMA_H_ */ diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index bbd259cea4f4..2b3964529539 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -48,6 +48,7 @@ #define VSYNCS_BETWEEN_FLIP_THRESHOLD 2 #define FREESYNC_CONSEC_FLIP_AFTER_VSYNC 5 #define FREESYNC_VSYNC_TO_FLIP_DELTA_IN_US 500 +#define MICRO_HZ_TO_HZ(x) (x / 1000000) struct core_freesync { struct mod_freesync public; @@ -121,6 +122,17 @@ static unsigned int calc_duration_in_us_from_v_total( return duration_in_us; } +static unsigned int calc_max_hardware_v_total(const struct dc_stream_state *stream) +{ + unsigned int max_hw_v_total = stream->ctx->dc->caps.max_v_total; + + if (stream->ctx->dc->caps.vtotal_limited_by_fp2) { + max_hw_v_total -= stream->timing.v_front_porch + 1; + } + + return max_hw_v_total; +} + unsigned int mod_freesync_calc_v_total_from_refresh( const struct dc_stream_state *stream, unsigned int refresh_in_uhz) @@ -128,13 +140,26 @@ unsigned int mod_freesync_calc_v_total_from_refresh( unsigned int v_total; unsigned int frame_duration_in_ns; + if (refresh_in_uhz == 0) + return stream->timing.v_total; + frame_duration_in_ns = ((unsigned int)(div64_u64((1000000000ULL * 1000000), refresh_in_uhz))); - v_total = div64_u64(div64_u64(((unsigned long long)( - frame_duration_in_ns) * (stream->timing.pix_clk_100hz / 10)), - stream->timing.h_total) + 500000, 1000000); + if (MICRO_HZ_TO_HZ(refresh_in_uhz) <= stream->timing.min_refresh_in_uhz) { + /* When the target refresh rate is the minimum panel refresh rate, + * round down the vtotal value to avoid stretching vblank over + * panel's vtotal boundary. + */ + v_total = div64_u64(div64_u64(((unsigned long long)( + frame_duration_in_ns) * (stream->timing.pix_clk_100hz / 10)), + stream->timing.h_total), 1000000); + } else { + v_total = div64_u64(div64_u64(((unsigned long long)( + frame_duration_in_ns) * (stream->timing.pix_clk_100hz / 10)), + stream->timing.h_total) + 500000, 1000000); + } /* v_total cannot be less than nominal */ if (v_total < stream->timing.v_total) { @@ -1002,7 +1027,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, if (stream->ctx->dc->caps.max_v_total != 0 && stream->timing.h_total != 0) { min_hardware_refresh_in_uhz = div64_u64((stream->timing.pix_clk_100hz * 100000000ULL), - (stream->timing.h_total * (long long)stream->ctx->dc->caps.max_v_total)); + (stream->timing.h_total * (long long)calc_max_hardware_v_total(stream))); } /* Limit minimum refresh rate to what can be supported by hardware */ min_refresh_in_uhz = min_hardware_refresh_in_uhz > in_config->min_refresh_in_uhz ? diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c index c996365e84b0..1d41dd58f6bc 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c @@ -27,6 +27,11 @@ #include "hdcp.h" +static inline uint16_t get_hdmi_rxstatus_msg_size(const uint8_t rxstatus[2]) +{ + return HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(rxstatus[1]) << 8 | rxstatus[0]; +} + static inline enum mod_hdcp_status check_receiver_id_list_ready(struct mod_hdcp *hdcp) { uint8_t is_ready = 0; @@ -35,8 +40,7 @@ static inline enum mod_hdcp_status check_receiver_id_list_ready(struct mod_hdcp is_ready = HDCP_2_2_DP_RXSTATUS_READY(hdcp->auth.msg.hdcp2.rxstatus_dp) ? 1 : 0; else is_ready = (HDCP_2_2_HDMI_RXSTATUS_READY(hdcp->auth.msg.hdcp2.rxstatus[1]) && - (HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 | - hdcp->auth.msg.hdcp2.rxstatus[0])) ? 1 : 0; + get_hdmi_rxstatus_msg_size(hdcp->auth.msg.hdcp2.rxstatus) != 0) ? 1 : 0; return is_ready ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY; } @@ -84,15 +88,13 @@ static inline enum mod_hdcp_status check_link_integrity_failure_dp( static enum mod_hdcp_status check_ake_cert_available(struct mod_hdcp *hdcp) { enum mod_hdcp_status status; - uint16_t size; if (is_dp_hdcp(hdcp)) { status = MOD_HDCP_STATUS_SUCCESS; } else { status = mod_hdcp_read_rxstatus(hdcp); if (status == MOD_HDCP_STATUS_SUCCESS) { - size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 | - hdcp->auth.msg.hdcp2.rxstatus[0]; + const uint16_t size = get_hdmi_rxstatus_msg_size(hdcp->auth.msg.hdcp2.rxstatus); status = (size == sizeof(hdcp->auth.msg.hdcp2.ake_cert)) ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING; @@ -104,7 +106,6 @@ static enum mod_hdcp_status check_ake_cert_available(struct mod_hdcp *hdcp) static enum mod_hdcp_status check_h_prime_available(struct mod_hdcp *hdcp) { enum mod_hdcp_status status; - uint8_t size; status = mod_hdcp_read_rxstatus(hdcp); if (status != MOD_HDCP_STATUS_SUCCESS) @@ -115,8 +116,7 @@ static enum mod_hdcp_status check_h_prime_available(struct mod_hdcp *hdcp) MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING; } else { - size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 | - hdcp->auth.msg.hdcp2.rxstatus[0]; + const uint16_t size = get_hdmi_rxstatus_msg_size(hdcp->auth.msg.hdcp2.rxstatus); status = (size == sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)) ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING; @@ -128,7 +128,6 @@ out: static enum mod_hdcp_status check_pairing_info_available(struct mod_hdcp *hdcp) { enum mod_hdcp_status status; - uint8_t size; status = mod_hdcp_read_rxstatus(hdcp); if (status != MOD_HDCP_STATUS_SUCCESS) @@ -139,8 +138,7 @@ static enum mod_hdcp_status check_pairing_info_available(struct mod_hdcp *hdcp) MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING; } else { - size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 | - hdcp->auth.msg.hdcp2.rxstatus[0]; + const uint16_t size = get_hdmi_rxstatus_msg_size(hdcp->auth.msg.hdcp2.rxstatus); status = (size == sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info)) ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING; @@ -152,7 +150,6 @@ out: static enum mod_hdcp_status poll_l_prime_available(struct mod_hdcp *hdcp) { enum mod_hdcp_status status = MOD_HDCP_STATUS_FAILURE; - uint8_t size; uint16_t max_wait = 20; // units of ms uint16_t num_polls = 5; uint16_t wait_time = max_wait / num_polls; @@ -167,8 +164,7 @@ static enum mod_hdcp_status poll_l_prime_available(struct mod_hdcp *hdcp) if (status != MOD_HDCP_STATUS_SUCCESS) break; - size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 | - hdcp->auth.msg.hdcp2.rxstatus[0]; + const uint16_t size = get_hdmi_rxstatus_msg_size(hdcp->auth.msg.hdcp2.rxstatus); status = (size == sizeof(hdcp->auth.msg.hdcp2.lc_l_prime)) ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING; @@ -181,7 +177,6 @@ static enum mod_hdcp_status poll_l_prime_available(struct mod_hdcp *hdcp) static enum mod_hdcp_status check_stream_ready_available(struct mod_hdcp *hdcp) { enum mod_hdcp_status status; - uint8_t size; if (is_dp_hdcp(hdcp)) { status = MOD_HDCP_STATUS_INVALID_OPERATION; @@ -189,8 +184,7 @@ static enum mod_hdcp_status check_stream_ready_available(struct mod_hdcp *hdcp) status = mod_hdcp_read_rxstatus(hdcp); if (status != MOD_HDCP_STATUS_SUCCESS) goto out; - size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 | - hdcp->auth.msg.hdcp2.rxstatus[0]; + const uint16_t size = get_hdmi_rxstatus_msg_size(hdcp->auth.msg.hdcp2.rxstatus); status = (size == sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready)) ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING; @@ -249,8 +243,7 @@ static uint8_t process_rxstatus(struct mod_hdcp *hdcp, sizeof(hdcp->auth.msg.hdcp2.rx_id_list); else hdcp->auth.msg.hdcp2.rx_id_list_size = - HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 | - hdcp->auth.msg.hdcp2.rxstatus[0]; + get_hdmi_rxstatus_msg_size(hdcp->auth.msg.hdcp2.rxstatus); } out: return (*status == MOD_HDCP_STATUS_SUCCESS); diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c index 3cd52e7a9c77..95838c7ab054 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c @@ -841,6 +841,8 @@ bool is_psr_su_specific_panel(struct dc_link *link) isPSRSUSupported = false; else if (dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x03) isPSRSUSupported = false; + else if (dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x01) + isPSRSUSupported = false; else if (dpcd_caps->psr_info.force_psrsu_cap == 0x1) isPSRSUSupported = true; } diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index 3f91926a50e9..7eefcb0f5070 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -28,6 +28,8 @@ #define AMD_MAX_USEC_TIMEOUT 1000000 /* 1000 ms */ +struct amdgpu_ip_block; + /* * Chip flags @@ -337,6 +339,11 @@ enum DC_DEBUG_MASK { * @DC_FORCE_IPS_ENABLE: If set, force enable all IPS, all the time. */ DC_FORCE_IPS_ENABLE = 0x4000, + /** + * @DC_DISABLE_ACPI_EDID: If set, don't attempt to fetch EDID for + * eDP display from ACPI _DDC method. + */ + DC_DISABLE_ACPI_EDID = 0x8000, }; enum amd_dpm_forced_level; @@ -377,30 +384,30 @@ enum amd_dpm_forced_level; */ struct amd_ip_funcs { char *name; - int (*early_init)(void *handle); - int (*late_init)(void *handle); - int (*sw_init)(void *handle); - int (*sw_fini)(void *handle); - int (*early_fini)(void *handle); - int (*hw_init)(void *handle); - int (*hw_fini)(void *handle); - void (*late_fini)(void *handle); - int (*prepare_suspend)(void *handle); - int (*suspend)(void *handle); - int (*resume)(void *handle); + int (*early_init)(struct amdgpu_ip_block *ip_block); + int (*late_init)(struct amdgpu_ip_block *ip_block); + int (*sw_init)(struct amdgpu_ip_block *ip_block); + int (*sw_fini)(struct amdgpu_ip_block *ip_block); + int (*early_fini)(struct amdgpu_ip_block *ip_block); + int (*hw_init)(struct amdgpu_ip_block *ip_block); + int (*hw_fini)(struct amdgpu_ip_block *ip_block); + void (*late_fini)(struct amdgpu_ip_block *ip_block); + int (*prepare_suspend)(struct amdgpu_ip_block *ip_block); + int (*suspend)(struct amdgpu_ip_block *ip_block); + int (*resume)(struct amdgpu_ip_block *ip_block); bool (*is_idle)(void *handle); - int (*wait_for_idle)(void *handle); - bool (*check_soft_reset)(void *handle); - int (*pre_soft_reset)(void *handle); - int (*soft_reset)(void *handle); - int (*post_soft_reset)(void *handle); + int (*wait_for_idle)(struct amdgpu_ip_block *ip_block); + bool (*check_soft_reset)(struct amdgpu_ip_block *ip_block); + int (*pre_soft_reset)(struct amdgpu_ip_block *ip_block); + int (*soft_reset)(struct amdgpu_ip_block *ip_block); + int (*post_soft_reset)(struct amdgpu_ip_block *ip_block); int (*set_clockgating_state)(void *handle, enum amd_clockgating_state state); int (*set_powergating_state)(void *handle, enum amd_powergating_state state); void (*get_clockgating_state)(void *handle, u64 *flags); - void (*dump_ip_state)(void *handle); - void (*print_ip_state)(void *handle, struct drm_printer *p); + void (*dump_ip_state)(struct amdgpu_ip_block *ip_block); + void (*print_ip_state)(struct amdgpu_ip_block *ip_block, struct drm_printer *p); }; diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_sh_mask.h index f42a276499cd..5d9d5fea6e06 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_sh_mask.h @@ -6199,10 +6199,12 @@ #define DCHUBBUB_CTRL_STATUS__ROB_UNDERFLOW_STATUS__SHIFT 0x1 #define DCHUBBUB_CTRL_STATUS__ROB_OVERFLOW_STATUS__SHIFT 0x2 #define DCHUBBUB_CTRL_STATUS__ROB_OVERFLOW_CLEAR__SHIFT 0x3 +#define DCHUBBUB_CTRL_STATUS__DCHUBBUB_HW_DEBUG__SHIFT 0x4 #define DCHUBBUB_CTRL_STATUS__CSTATE_SWATH_CHK_GOOD_MODE__SHIFT 0x1f #define DCHUBBUB_CTRL_STATUS__ROB_UNDERFLOW_STATUS_MASK 0x00000002L #define DCHUBBUB_CTRL_STATUS__ROB_OVERFLOW_STATUS_MASK 0x00000004L #define DCHUBBUB_CTRL_STATUS__ROB_OVERFLOW_CLEAR_MASK 0x00000008L +#define DCHUBBUB_CTRL_STATUS__DCHUBBUB_HW_DEBUG_MASK 0x3FFFFFF0L #define DCHUBBUB_CTRL_STATUS__CSTATE_SWATH_CHK_GOOD_MODE_MASK 0x80000000L //DCHUBBUB_TIMEOUT_DETECTION_CTRL1 #define DCHUBBUB_TIMEOUT_DETECTION_CTRL1__DCHUBBUB_TIMEOUT_ERROR_STATUS__SHIFT 0x0 diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h index 2c3ce243861a..380e44230bda 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h @@ -1232,6 +1232,29 @@ #define mmMC_VM_MX_L1_PERFCOUNTER_HI 0x059d #define mmMC_VM_MX_L1_PERFCOUNTER_HI_BASE_IDX 0 +// Stand Alone Walker Registers +#define VMC_TAP_PDE_REQUEST_SNOOP_OFFSET 8 +#define VMC_TAP_PTE_REQUEST_SNOOP_OFFSET 11 +#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32 0x0606 +#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0 +#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32 0x0607 +#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0 +#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_START_ADDR_LO32 0x0608 +#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0 +#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_START_ADDR_HI32 0x0609 +#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0 +#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_END_ADDR_LO32 0x060a +#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0 +#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_END_ADDR_HI32 0x060b +#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0 +#define mmVM_L2_SAW_CONTEXT0_CNTL 0x0604 +#define mmVM_L2_SAW_CONTEXT0_CNTL_BASE_IDX 0 +#define CONTEXT0_CNTL_ENABLE_OFFSET 0 +#define CONTEXT0_CNTL_PAGE_TABLE_DEPTH_OFFSET 1 +#define mmVM_L2_SAW_CONTEXTS_DISABLE 0x060c +#define mmVM_L2_SAW_CONTEXTS_DISABLE_BASE_IDX 0 +#define mmVM_L2_SAW_CNTL4 0x0603 +#define mmVM_L2_SAW_CNTL4_BASE_IDX 0 // addressBlock: mmhub_utcl2_atcl2dec // base address: 0x69900 diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h index 5ebe4cb40f9d..c38a01742d6f 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h @@ -7571,6 +7571,8 @@ // base address: 0x10100000 #define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0 0xd000 #define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0_BASE_IDX 5 +#define regRCC_DEV0_EPF5_STRAP4 0xd284 +#define regRCC_DEV0_EPF5_STRAP4_BASE_IDX 5 // addressBlock: nbio_nbif0_bif_rst_bif_rst_regblk diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_sh_mask.h index eb8c556d9c93..3b96f1e5a180 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_sh_mask.h @@ -50665,6 +50665,19 @@ #define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_D1_SUPPORT_DEV0_F0_MASK 0x40000000L #define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_D2_SUPPORT_DEV0_F0_MASK 0x80000000L +//RCC_DEV0_EPF5_STRAP4 +#define RCC_DEV0_EPF5_STRAP4__STRAP_ATOMIC_64BIT_EN_DEV0_F5__SHIFT 0x14 +#define RCC_DEV0_EPF5_STRAP4__STRAP_ATOMIC_EN_DEV0_F5__SHIFT 0x15 +#define RCC_DEV0_EPF5_STRAP4__STRAP_FLR_EN_DEV0_F5__SHIFT 0x16 +#define RCC_DEV0_EPF5_STRAP4__STRAP_PME_SUPPORT_DEV0_F5__SHIFT 0x17 +#define RCC_DEV0_EPF5_STRAP4__STRAP_INTERRUPT_PIN_DEV0_F5__SHIFT 0x1c +#define RCC_DEV0_EPF5_STRAP4__STRAP_AUXPWR_SUPPORT_DEV0_F5__SHIFT 0x1f +#define RCC_DEV0_EPF5_STRAP4__STRAP_ATOMIC_64BIT_EN_DEV0_F5_MASK 0x00100000L +#define RCC_DEV0_EPF5_STRAP4__STRAP_ATOMIC_EN_DEV0_F5_MASK 0x00200000L +#define RCC_DEV0_EPF5_STRAP4__STRAP_FLR_EN_DEV0_F5_MASK 0x00400000L +#define RCC_DEV0_EPF5_STRAP4__STRAP_PME_SUPPORT_DEV0_F5_MASK 0x0F800000L +#define RCC_DEV0_EPF5_STRAP4__STRAP_INTERRUPT_PIN_DEV0_F5_MASK 0x70000000L +#define RCC_DEV0_EPF5_STRAP4__STRAP_AUXPWR_SUPPORT_DEV0_F5_MASK 0x80000000L // addressBlock: nbio_nbif0_bif_rst_bif_rst_regblk //HARD_RST_CTRL diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index 19a48d98830a..67a5de573943 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -119,6 +119,8 @@ enum pp_clock_type { OD_ACOUSTIC_TARGET, OD_FAN_TARGET_TEMPERATURE, OD_FAN_MINIMUM_PWM, + OD_FAN_ZERO_RPM_ENABLE, + OD_FAN_ZERO_RPM_STOP_TEMP, }; enum amd_pp_sensors { @@ -199,6 +201,8 @@ enum PP_OD_DPM_TABLE_COMMAND { PP_OD_EDIT_ACOUSTIC_TARGET, PP_OD_EDIT_FAN_TARGET_TEMPERATURE, PP_OD_EDIT_FAN_MINIMUM_PWM, + PP_OD_EDIT_FAN_ZERO_RPM_ENABLE, + PP_OD_EDIT_FAN_ZERO_RPM_STOP_TEMP, }; struct pp_states_info { @@ -336,7 +340,8 @@ enum pp_policy_soc_pstate { #define MAX_CLKS 4 #define NUM_VCN 4 #define NUM_JPEG_ENG 32 - +#define MAX_XCC 8 +#define NUM_XCP 8 struct seq_file; enum amd_pp_clock_type; struct amd_pp_simple_clock_info; @@ -350,6 +355,26 @@ struct pp_smu_wm_range_sets; struct pp_smu_nv_clock_table; struct dpm_clocks; +struct amdgpu_xcp_metrics { + /* Utilization Instantaneous (%) */ + uint32_t gfx_busy_inst[MAX_XCC]; + uint16_t jpeg_busy[NUM_JPEG_ENG]; + uint16_t vcn_busy[NUM_VCN]; + /* Utilization Accumulated (%) */ + uint64_t gfx_busy_acc[MAX_XCC]; +}; + +struct amdgpu_xcp_metrics_v1_1 { + /* Utilization Instantaneous (%) */ + uint32_t gfx_busy_inst[MAX_XCC]; + uint16_t jpeg_busy[NUM_JPEG_ENG]; + uint16_t vcn_busy[NUM_VCN]; + /* Utilization Accumulated (%) */ + uint64_t gfx_busy_acc[MAX_XCC]; + /* Total App Clock Counter Accumulated */ + uint64_t gfx_below_host_limit_acc[MAX_XCC]; +}; + struct amd_pm_funcs { /* export for dpm on ci and si */ int (*pre_set_power_state)(void *handle); @@ -872,6 +897,196 @@ struct gpu_metrics_v1_5 { uint16_t padding; }; +struct gpu_metrics_v1_6 { + struct metrics_table_header common_header; + + /* Temperature (Celsius) */ + uint16_t temperature_hotspot; + uint16_t temperature_mem; + uint16_t temperature_vrsoc; + + /* Power (Watts) */ + uint16_t curr_socket_power; + + /* Utilization (%) */ + uint16_t average_gfx_activity; + uint16_t average_umc_activity; // memory controller + + /* Energy (15.259uJ (2^-16) units) */ + uint64_t energy_accumulator; + + /* Driver attached timestamp (in ns) */ + uint64_t system_clock_counter; + + /* Accumulation cycle counter */ + uint32_t accumulation_counter; + + /* Accumulated throttler residencies */ + uint32_t prochot_residency_acc; + uint32_t ppt_residency_acc; + uint32_t socket_thm_residency_acc; + uint32_t vr_thm_residency_acc; + uint32_t hbm_thm_residency_acc; + + /* Clock Lock Status. Each bit corresponds to clock instance */ + uint32_t gfxclk_lock_status; + + /* Link width (number of lanes) and speed (in 0.1 GT/s) */ + uint16_t pcie_link_width; + uint16_t pcie_link_speed; + + /* XGMI bus width and bitrate (in Gbps) */ + uint16_t xgmi_link_width; + uint16_t xgmi_link_speed; + + /* Utilization Accumulated (%) */ + uint32_t gfx_activity_acc; + uint32_t mem_activity_acc; + + /*PCIE accumulated bandwidth (GB/sec) */ + uint64_t pcie_bandwidth_acc; + + /*PCIE instantaneous bandwidth (GB/sec) */ + uint64_t pcie_bandwidth_inst; + + /* PCIE L0 to recovery state transition accumulated count */ + uint64_t pcie_l0_to_recov_count_acc; + + /* PCIE replay accumulated count */ + uint64_t pcie_replay_count_acc; + + /* PCIE replay rollover accumulated count */ + uint64_t pcie_replay_rover_count_acc; + + /* PCIE NAK sent accumulated count */ + uint32_t pcie_nak_sent_count_acc; + + /* PCIE NAK received accumulated count */ + uint32_t pcie_nak_rcvd_count_acc; + + /* XGMI accumulated data transfer size(KiloBytes) */ + uint64_t xgmi_read_data_acc[NUM_XGMI_LINKS]; + uint64_t xgmi_write_data_acc[NUM_XGMI_LINKS]; + + /* PMFW attached timestamp (10ns resolution) */ + uint64_t firmware_timestamp; + + /* Current clocks (Mhz) */ + uint16_t current_gfxclk[MAX_GFX_CLKS]; + uint16_t current_socclk[MAX_CLKS]; + uint16_t current_vclk0[MAX_CLKS]; + uint16_t current_dclk0[MAX_CLKS]; + uint16_t current_uclk; + + /* Number of current partition */ + uint16_t num_partition; + + /* XCP metrics stats */ + struct amdgpu_xcp_metrics xcp_stats[NUM_XCP]; + + /* PCIE other end recovery counter */ + uint32_t pcie_lc_perf_other_end_recovery; +}; + +struct gpu_metrics_v1_7 { + struct metrics_table_header common_header; + + /* Temperature (Celsius) */ + uint16_t temperature_hotspot; + uint16_t temperature_mem; + uint16_t temperature_vrsoc; + + /* Power (Watts) */ + uint16_t curr_socket_power; + + /* Utilization (%) */ + uint16_t average_gfx_activity; + uint16_t average_umc_activity; // memory controller + + /* VRAM max bandwidthi (in GB/sec) at max memory clock */ + uint64_t mem_max_bandwidth; + + /* Energy (15.259uJ (2^-16) units) */ + uint64_t energy_accumulator; + + /* Driver attached timestamp (in ns) */ + uint64_t system_clock_counter; + + /* Accumulation cycle counter */ + uint32_t accumulation_counter; + + /* Accumulated throttler residencies */ + uint32_t prochot_residency_acc; + uint32_t ppt_residency_acc; + uint32_t socket_thm_residency_acc; + uint32_t vr_thm_residency_acc; + uint32_t hbm_thm_residency_acc; + + /* Clock Lock Status. Each bit corresponds to clock instance */ + uint32_t gfxclk_lock_status; + + /* Link width (number of lanes) and speed (in 0.1 GT/s) */ + uint16_t pcie_link_width; + uint16_t pcie_link_speed; + + /* XGMI bus width and bitrate (in Gbps) */ + uint16_t xgmi_link_width; + uint16_t xgmi_link_speed; + + /* Utilization Accumulated (%) */ + uint32_t gfx_activity_acc; + uint32_t mem_activity_acc; + + /*PCIE accumulated bandwidth (GB/sec) */ + uint64_t pcie_bandwidth_acc; + + /*PCIE instantaneous bandwidth (GB/sec) */ + uint64_t pcie_bandwidth_inst; + + /* PCIE L0 to recovery state transition accumulated count */ + uint64_t pcie_l0_to_recov_count_acc; + + /* PCIE replay accumulated count */ + uint64_t pcie_replay_count_acc; + + /* PCIE replay rollover accumulated count */ + uint64_t pcie_replay_rover_count_acc; + + /* PCIE NAK sent accumulated count */ + uint32_t pcie_nak_sent_count_acc; + + /* PCIE NAK received accumulated count */ + uint32_t pcie_nak_rcvd_count_acc; + + /* XGMI accumulated data transfer size(KiloBytes) */ + uint64_t xgmi_read_data_acc[NUM_XGMI_LINKS]; + uint64_t xgmi_write_data_acc[NUM_XGMI_LINKS]; + + /* XGMI link status(active/inactive) */ + uint16_t xgmi_link_status[NUM_XGMI_LINKS]; + + uint16_t padding; + + /* PMFW attached timestamp (10ns resolution) */ + uint64_t firmware_timestamp; + + /* Current clocks (Mhz) */ + uint16_t current_gfxclk[MAX_GFX_CLKS]; + uint16_t current_socclk[MAX_CLKS]; + uint16_t current_vclk0[MAX_CLKS]; + uint16_t current_dclk0[MAX_CLKS]; + uint16_t current_uclk; + + /* Number of current partition */ + uint16_t num_partition; + + /* XCP metrics stats */ + struct amdgpu_xcp_metrics_v1_1 xcp_stats[NUM_XCP]; + + /* PCIE other end recovery counter */ + uint32_t pcie_lc_perf_other_end_recovery; +}; + /* * gpu_metrics_v2_0 is not recommended as it's not naturally aligned. * Use gpu_metrics_v2_1 or later instead. diff --git a/drivers/gpu/drm/amd/include/mes_v11_api_def.h b/drivers/gpu/drm/amd/include/mes_v11_api_def.h index 21ceafce1f9b..eb46cb10c24d 100644 --- a/drivers/gpu/drm/amd/include/mes_v11_api_def.h +++ b/drivers/gpu/drm/amd/include/mes_v11_api_def.h @@ -230,13 +230,23 @@ union MESAPI_SET_HW_RESOURCES { uint32_t disable_add_queue_wptr_mc_addr : 1; uint32_t enable_mes_event_int_logging : 1; uint32_t enable_reg_active_poll : 1; - uint32_t reserved : 21; + uint32_t use_disable_queue_in_legacy_uq_preemption : 1; + uint32_t send_write_data : 1; + uint32_t os_tdr_timeout_override : 1; + uint32_t use_rs64mem_for_proc_gang_ctx : 1; + uint32_t use_add_queue_unmap_flag_addr : 1; + uint32_t enable_mes_sch_stb_log : 1; + uint32_t limit_single_process : 1; + uint32_t is_strix_tmz_wa_enabled :1; + uint32_t reserved : 13; }; uint32_t uint32_t_all; }; uint32_t oversubscription_timer; uint64_t doorbell_info; uint64_t event_intr_history_gpu_mc_ptr; + uint64_t timestamp; + uint32_t os_tdr_timeout_in_sec; }; uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS]; @@ -563,6 +573,11 @@ enum MESAPI_MISC_OPCODE { MESAPI_MISC__READ_REG, MESAPI_MISC__WAIT_REG_MEM, MESAPI_MISC__SET_SHADER_DEBUGGER, + MESAPI_MISC__NOTIFY_WORK_ON_UNMAPPED_QUEUE, + MESAPI_MISC__NOTIFY_TO_UNMAP_PROCESSES, + MESAPI_MISC__CHANGE_CONFIG, + MESAPI_MISC__LAUNCH_CLEANER_SHADER, + MESAPI_MISC__MAX, }; @@ -617,6 +632,31 @@ struct SET_SHADER_DEBUGGER { uint32_t trap_en; }; +enum MESAPI_MISC__CHANGE_CONFIG_OPTION { + MESAPI_MISC__CHANGE_CONFIG_OPTION_LIMIT_SINGLE_PROCESS = 0, + MESAPI_MISC__CHANGE_CONFIG_OPTION_ENABLE_HWS_LOGGING_BUFFER = 1, + MESAPI_MISC__CHANGE_CONFIG_OPTION_CHANGE_TDR_CONFIG = 2, + + MESAPI_MISC__CHANGE_CONFIG_OPTION_MAX = 0x1F +}; + +struct CHANGE_CONFIG { + enum MESAPI_MISC__CHANGE_CONFIG_OPTION opcode; + union { + struct { + uint32_t limit_single_process : 1; + uint32_t enable_hws_logging_buffer : 1; + uint32_t reserved : 31; + } bits; + uint32_t all; + } option; + + struct { + uint32_t tdr_level; + uint32_t tdr_delay; + } tdr_config; +}; + union MESAPI__MISC { struct { union MES_API_HEADER header; @@ -631,6 +671,7 @@ union MESAPI__MISC { struct WAIT_REG_MEM wait_reg_mem; struct SET_SHADER_DEBUGGER set_shader_debugger; enum MES_AMD_PRIORITY_LEVEL queue_sch_level; + struct CHANGE_CONFIG change_config; uint32_t data[MISC_DATA_MAX_SIZE_IN_DWORDS]; }; diff --git a/drivers/gpu/drm/amd/include/mes_v12_api_def.h b/drivers/gpu/drm/amd/include/mes_v12_api_def.h index 101e2fe962c6..c9b2ca5cf75f 100644 --- a/drivers/gpu/drm/amd/include/mes_v12_api_def.h +++ b/drivers/gpu/drm/amd/include/mes_v12_api_def.h @@ -643,6 +643,10 @@ enum MESAPI_MISC_OPCODE { MESAPI_MISC__SET_SHADER_DEBUGGER, MESAPI_MISC__NOTIFY_WORK_ON_UNMAPPED_QUEUE, MESAPI_MISC__NOTIFY_TO_UNMAP_PROCESSES, + MESAPI_MISC__QUERY_HUNG_ENGINE_ID, + MESAPI_MISC__CHANGE_CONFIG, + MESAPI_MISC__LAUNCH_CLEANER_SHADER, + MESAPI_MISC__SETUP_MES_DBGEXT, MESAPI_MISC__MAX, }; @@ -713,6 +717,31 @@ struct SET_GANG_SUBMIT { uint32_t slave_gang_context_array_index; }; +enum MESAPI_MISC__CHANGE_CONFIG_OPTION { + MESAPI_MISC__CHANGE_CONFIG_OPTION_LIMIT_SINGLE_PROCESS = 0, + MESAPI_MISC__CHANGE_CONFIG_OPTION_ENABLE_HWS_LOGGING_BUFFER = 1, + MESAPI_MISC__CHANGE_CONFIG_OPTION_CHANGE_TDR_CONFIG = 2, + + MESAPI_MISC__CHANGE_CONFIG_OPTION_MAX = 0x1F +}; + +struct CHANGE_CONFIG { + enum MESAPI_MISC__CHANGE_CONFIG_OPTION opcode; + union { + struct { + uint32_t limit_single_process : 1; + uint32_t enable_hws_logging_buffer : 1; + uint32_t reserved : 30; + } bits; + uint32_t all; + } option; + + struct { + uint32_t tdr_level; + uint32_t tdr_delay; + } tdr_config; +}; + union MESAPI__MISC { struct { union MES_API_HEADER header; @@ -726,7 +755,7 @@ union MESAPI__MISC { struct WAIT_REG_MEM wait_reg_mem; struct SET_SHADER_DEBUGGER set_shader_debugger; enum MES_AMD_PRIORITY_LEVEL queue_sch_level; - + struct CHANGE_CONFIG change_config; uint32_t data[MISC_DATA_MAX_SIZE_IN_DWORDS]; }; uint64_t timestamp; diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index d5d6ab484e5a..e8ae7681bf0a 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -145,15 +145,12 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; amdgpu_dpm_get_current_power_state(adev, &pm); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); return sysfs_emit(buf, "%s\n", @@ -185,11 +182,9 @@ static ssize_t amdgpu_set_power_dpm_state(struct device *dev, else return -EINVAL; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = pm_runtime_resume_and_get(ddev->dev); + if (ret < 0) return ret; - } amdgpu_dpm_set_power_state(adev, state); @@ -273,15 +268,12 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; level = amdgpu_dpm_get_performance_level(adev); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); return sysfs_emit(buf, "%s\n", @@ -336,11 +328,9 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, return -EINVAL; } - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = pm_runtime_resume_and_get(ddev->dev); + if (ret < 0) return ret; - } mutex_lock(&adev->pm.stable_pstate_ctx_lock); if (amdgpu_dpm_force_performance_level(adev, level)) { @@ -374,16 +364,13 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; if (amdgpu_dpm_get_pp_num_states(adev, &data)) memset(&data, 0, sizeof(data)); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); buf_len = sysfs_emit(buf, "states: %d\n", data.nums); @@ -412,17 +399,14 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; amdgpu_dpm_get_current_power_state(adev, &pm); ret = amdgpu_dpm_get_pp_num_states(adev, &data); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); if (ret) @@ -485,11 +469,9 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev, idx = array_index_nospec(idx, ARRAY_SIZE(data.states)); - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = pm_runtime_resume_and_get(ddev->dev); + if (ret < 0) return ret; - } ret = amdgpu_dpm_get_pp_num_states(adev, &data); if (ret) @@ -544,15 +526,12 @@ static ssize_t amdgpu_get_pp_table(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; size = amdgpu_dpm_get_pp_table(adev, &table); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); if (size <= 0) @@ -580,11 +559,9 @@ static ssize_t amdgpu_set_pp_table(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = pm_runtime_resume_and_get(ddev->dev); + if (ret < 0) return ret; - } ret = amdgpu_dpm_set_pp_table(adev, buf, count); @@ -808,11 +785,9 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, tmp_str++; } - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = pm_runtime_resume_and_get(ddev->dev); + if (ret < 0) return ret; - } if (amdgpu_dpm_set_fine_grain_clk_vol(adev, type, @@ -865,11 +840,9 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; for (clk_index = 0 ; clk_index < 6 ; clk_index++) { ret = amdgpu_dpm_emit_clock_levels(adev, od_clocks[clk_index], buf, &size); @@ -888,7 +861,6 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, if (size == 0) size = sysfs_emit(buf, "\n"); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); return size; @@ -929,11 +901,9 @@ static ssize_t amdgpu_set_pp_features(struct device *dev, if (ret) return -EINVAL; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = pm_runtime_resume_and_get(ddev->dev); + if (ret < 0) return ret; - } ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask); @@ -960,17 +930,14 @@ static ssize_t amdgpu_get_pp_features(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; size = amdgpu_dpm_get_ppfeature_status(adev, buf); if (size <= 0) size = sysfs_emit(buf, "\n"); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); return size; @@ -1029,11 +996,9 @@ static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; ret = amdgpu_dpm_emit_clock_levels(adev, type, buf, &size); if (ret == -ENOENT) @@ -1042,7 +1007,6 @@ static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev, if (size == 0) size = sysfs_emit(buf, "\n"); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); return size; @@ -1102,11 +1066,9 @@ static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev, if (ret) return ret; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = pm_runtime_resume_and_get(ddev->dev); + if (ret < 0) return ret; - } ret = amdgpu_dpm_force_clock_level(adev, type, mask); @@ -1283,15 +1245,12 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; value = amdgpu_dpm_get_sclk_od(adev); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); return sysfs_emit(buf, "%d\n", value); @@ -1317,11 +1276,9 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev, if (ret) return -EINVAL; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = pm_runtime_resume_and_get(ddev->dev); + if (ret < 0) return ret; - } amdgpu_dpm_set_sclk_od(adev, (uint32_t)value); @@ -1345,15 +1302,12 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; value = amdgpu_dpm_get_mclk_od(adev); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); return sysfs_emit(buf, "%d\n", value); @@ -1379,11 +1333,9 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, if (ret) return -EINVAL; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = pm_runtime_resume_and_get(ddev->dev); + if (ret < 0) return ret; - } amdgpu_dpm_set_mclk_od(adev, (uint32_t)value); @@ -1409,7 +1361,11 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, * create a custom set of heuristics, write a string of numbers to the file * starting with the number of the custom profile along with a setting * for each heuristic parameter. Due to differences across asic families - * the heuristic parameters vary from family to family. + * the heuristic parameters vary from family to family. Additionally, + * you can apply the custom heuristics to different clock domains. Each + * clock domain is considered a distinct operation so if you modify the + * gfxclk heuristics and then the memclk heuristics, the all of the + * custom heuristics will be retained until you switch to another profile. * */ @@ -1427,17 +1383,14 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; size = amdgpu_dpm_get_power_profile_mode(adev, buf); if (size <= 0) size = sysfs_emit(buf, "\n"); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); return size; @@ -1492,11 +1445,9 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, } parameter[parameter_size] = profile_mode; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = pm_runtime_resume_and_get(ddev->dev); + if (ret < 0) return ret; - } ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size); @@ -1520,16 +1471,13 @@ static int amdgpu_hwmon_get_sensor_generic(struct amdgpu_device *adev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - r = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (r < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return r; - } + r = pm_runtime_get_if_active(adev->dev); + if (r <= 0) + return r ?: -EPERM; /* get the sensor value */ r = amdgpu_dpm_read_sensor(adev, sensor, query, &size); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; @@ -1639,15 +1587,12 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev, if (!adev->asic_funcs->get_pcie_usage) return -ENODATA; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; amdgpu_asic_get_pcie_usage(adev, &count0, &count1); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); return sysfs_emit(buf, "%llu %llu %i\n", @@ -1770,11 +1715,9 @@ static ssize_t amdgpu_get_apu_thermal_cap(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; ret = amdgpu_dpm_get_apu_thermal_limit(adev, &limit); if (!ret) @@ -1782,7 +1725,6 @@ static ssize_t amdgpu_get_apu_thermal_cap(struct device *dev, else size = sysfs_emit(buf, "failed to get thermal limit\n"); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); return size; @@ -1807,14 +1749,14 @@ static ssize_t amdgpu_set_apu_thermal_cap(struct device *dev, return -EINVAL; } - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = pm_runtime_resume_and_get(ddev->dev); + if (ret < 0) return ret; - } ret = amdgpu_dpm_set_apu_thermal_limit(adev, value); if (ret) { + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); dev_err(dev, "failed to update thermal limit\n"); return ret; } @@ -1849,15 +1791,12 @@ static ssize_t amdgpu_get_pm_metrics(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; size = amdgpu_dpm_get_pm_metrics(adev, buf, PAGE_SIZE); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); return size; @@ -1890,11 +1829,9 @@ static ssize_t amdgpu_get_gpu_metrics(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics); if (size <= 0) @@ -1906,7 +1843,6 @@ static ssize_t amdgpu_get_gpu_metrics(struct device *dev, memcpy(buf, gpu_metrics, size); out: - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); return size; @@ -2008,11 +1944,9 @@ static ssize_t amdgpu_set_smartshift_bias(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - r = pm_runtime_get_sync(ddev->dev); - if (r < 0) { - pm_runtime_put_autosuspend(ddev->dev); + r = pm_runtime_resume_and_get(ddev->dev); + if (r < 0) return r; - } r = kstrtoint(buf, 10, &bias); if (r) @@ -2335,11 +2269,9 @@ static ssize_t amdgpu_set_pm_policy_attr(struct device *dev, policy_attr = container_of(attr, struct amdgpu_pm_policy_attr, dev_attr); - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = pm_runtime_resume_and_get(ddev->dev); + if (ret < 0) return ret; - } ret = amdgpu_dpm_set_pm_policy(adev, policy_attr->id, val); @@ -2772,15 +2704,12 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return ret; - } + ret = pm_runtime_get_if_active(adev->dev); + if (ret <= 0) + return ret ?: -EPERM; ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); if (ret) @@ -2817,11 +2746,9 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev, else return -EINVAL; - ret = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + ret = pm_runtime_resume_and_get(adev->dev); + if (ret < 0) return ret; - } ret = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode); @@ -2866,11 +2793,9 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev, if (err) return err; - err = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (err < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + err = pm_runtime_resume_and_get(adev->dev); + if (err < 0) return err; - } err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode); if (err) @@ -2907,15 +2832,12 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - err = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (err < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return err; - } + err = pm_runtime_get_if_active(adev->dev); + if (err <= 0) + return err ?: -EPERM; err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); if (err) @@ -2937,15 +2859,12 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - err = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (err < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return err; - } + err = pm_runtime_get_if_active(adev->dev); + if (err <= 0) + return err ?: -EPERM; err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); if (err) @@ -3001,15 +2920,12 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - err = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (err < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return err; - } + err = pm_runtime_get_if_active(adev->dev); + if (err <= 0) + return err ?: -EPERM; err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); if (err) @@ -3036,11 +2952,9 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev, if (err) return err; - err = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (err < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + err = pm_runtime_resume_and_get(adev->dev); + if (err < 0) return err; - } err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode); if (err) @@ -3076,15 +2990,12 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return ret; - } + ret = pm_runtime_get_if_active(adev->dev); + if (ret <= 0) + return ret ?: -EPERM; ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); if (ret) @@ -3119,11 +3030,9 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev, else return -EINVAL; - err = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (err < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + err = pm_runtime_resume_and_get(adev->dev); + if (err < 0) return err; - } err = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode); @@ -3248,11 +3157,9 @@ static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - r = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (r < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return r; - } + r = pm_runtime_get_if_active(adev->dev); + if (r <= 0) + return r ?: -EPERM; r = amdgpu_dpm_get_power_limit(adev, &limit, pp_limit_level, power_type); @@ -3262,7 +3169,6 @@ static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev, else size = sysfs_emit(buf, "\n"); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return size; @@ -3339,11 +3245,9 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, value = value / 1000000; /* convert to Watt */ value |= limit_type << 24; - err = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (err < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + err = pm_runtime_resume_and_get(adev->dev); + if (err < 0) return err; - } err = amdgpu_dpm_set_power_limit(adev, value); @@ -3787,17 +3691,14 @@ static int amdgpu_retrieve_od_settings(struct amdgpu_device *adev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(adev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(adev->dev); - return ret; - } + ret = pm_runtime_get_if_active(adev->dev); + if (ret <= 0) + return ret ?: -EPERM; size = amdgpu_dpm_print_clock_levels(adev, od_type, buf); if (size == 0) size = sysfs_emit(buf, "\n"); - pm_runtime_mark_last_busy(adev->dev); pm_runtime_put_autosuspend(adev->dev); return size; @@ -3879,23 +3780,23 @@ amdgpu_distribute_custom_od_settings(struct amdgpu_device *adev, if (ret) return ret; - ret = pm_runtime_get_sync(adev->dev); + ret = pm_runtime_resume_and_get(adev->dev); if (ret < 0) - goto err_out0; + return ret; ret = amdgpu_dpm_odn_edit_dpm_table(adev, cmd_type, parameter, parameter_size); if (ret) - goto err_out1; + goto err_out; if (cmd_type == PP_OD_COMMIT_DPM_TABLE) { ret = amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL); if (ret) - goto err_out1; + goto err_out; } pm_runtime_mark_last_busy(adev->dev); @@ -3903,9 +3804,8 @@ amdgpu_distribute_custom_od_settings(struct amdgpu_device *adev, return count; -err_out1: +err_out: pm_runtime_mark_last_busy(adev->dev); -err_out0: pm_runtime_put_autosuspend(adev->dev); return ret; @@ -4213,6 +4113,117 @@ static umode_t fan_minimum_pwm_visible(struct amdgpu_device *adev) return umode; } +/** + * DOC: fan_zero_rpm_enable + * + * The amdgpu driver provides a sysfs API for checking and adjusting the + * zero RPM feature. + * + * Reading back the file shows you the current setting and the permitted + * ranges if changable. + * + * Writing an integer to the file, change the setting accordingly. + * + * When you have finished the editing, write "c" (commit) to the file to commit + * your changes. + * + * If you want to reset to the default value, write "r" (reset) to the file to + * reset them. + */ +static ssize_t fan_zero_rpm_enable_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + struct od_kobj *container = container_of(kobj, struct od_kobj, kobj); + struct amdgpu_device *adev = (struct amdgpu_device *)container->priv; + + return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_ZERO_RPM_ENABLE, buf); +} + +static ssize_t fan_zero_rpm_enable_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, + size_t count) +{ + struct od_kobj *container = container_of(kobj, struct od_kobj, kobj); + struct amdgpu_device *adev = (struct amdgpu_device *)container->priv; + + return (ssize_t)amdgpu_distribute_custom_od_settings(adev, + PP_OD_EDIT_FAN_ZERO_RPM_ENABLE, + buf, + count); +} + +static umode_t fan_zero_rpm_enable_visible(struct amdgpu_device *adev) +{ + umode_t umode = 0000; + + if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_RETRIEVE) + umode |= S_IRUSR | S_IRGRP | S_IROTH; + + if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_SET) + umode |= S_IWUSR; + + return umode; +} + +/** + * DOC: fan_zero_rpm_stop_temperature + * + * The amdgpu driver provides a sysfs API for checking and adjusting the + * zero RPM stop temperature feature. + * + * Reading back the file shows you the current setting and the permitted + * ranges if changable. + * + * Writing an integer to the file, change the setting accordingly. + * + * When you have finished the editing, write "c" (commit) to the file to commit + * your changes. + * + * If you want to reset to the default value, write "r" (reset) to the file to + * reset them. + * + * This setting works only if the Zero RPM setting is enabled. It adjusts the + * temperature below which the fan can stop. + */ +static ssize_t fan_zero_rpm_stop_temp_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + struct od_kobj *container = container_of(kobj, struct od_kobj, kobj); + struct amdgpu_device *adev = (struct amdgpu_device *)container->priv; + + return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_ZERO_RPM_STOP_TEMP, buf); +} + +static ssize_t fan_zero_rpm_stop_temp_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, + size_t count) +{ + struct od_kobj *container = container_of(kobj, struct od_kobj, kobj); + struct amdgpu_device *adev = (struct amdgpu_device *)container->priv; + + return (ssize_t)amdgpu_distribute_custom_od_settings(adev, + PP_OD_EDIT_FAN_ZERO_RPM_STOP_TEMP, + buf, + count); +} + +static umode_t fan_zero_rpm_stop_temp_visible(struct amdgpu_device *adev) +{ + umode_t umode = 0000; + + if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_RETRIEVE) + umode |= S_IRUSR | S_IRGRP | S_IROTH; + + if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_SET) + umode |= S_IWUSR; + + return umode; +} + static struct od_feature_set amdgpu_od_set = { .containers = { [0] = { @@ -4258,6 +4269,22 @@ static struct od_feature_set amdgpu_od_set = { .store = fan_minimum_pwm_store, }, }, + [5] = { + .name = "fan_zero_rpm_enable", + .ops = { + .is_visible = fan_zero_rpm_enable_visible, + .show = fan_zero_rpm_enable_show, + .store = fan_zero_rpm_enable_store, + }, + }, + [6] = { + .name = "fan_zero_rpm_stop_temperature", + .ops = { + .is_visible = fan_zero_rpm_stop_temp_visible, + .show = fan_zero_rpm_stop_temp_show, + .store = fan_zero_rpm_stop_temp_store, + }, + }, }, }, }, @@ -4758,11 +4785,9 @@ static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused) if (adev->in_suspend && !adev->in_runpm) return -EPERM; - r = pm_runtime_get_sync(dev->dev); - if (r < 0) { - pm_runtime_put_autosuspend(dev->dev); + r = pm_runtime_resume_and_get(dev->dev); + if (r < 0) return r; - } if (amdgpu_dpm_debugfs_print_current_performance_level(adev, m)) { r = amdgpu_debugfs_pm_info_pp(m, adev); @@ -4777,7 +4802,6 @@ static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused) seq_printf(m, "\n"); out: - pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return r; diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h index f5bf41f21c41..363af8990aa2 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h @@ -328,6 +328,10 @@ struct config_table_setting #define OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET BIT(7) #define OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE BIT(8) #define OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET BIT(9) +#define OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_RETRIEVE BIT(10) +#define OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_SET BIT(11) +#define OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_RETRIEVE BIT(12) +#define OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_SET BIT(13) struct amdgpu_pm { struct mutex mutex; diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c index e8b6989a40f3..8908646ad620 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c @@ -2954,9 +2954,9 @@ static int kv_dpm_get_temp(void *handle) return actual_temp; } -static int kv_dpm_early_init(void *handle) +static int kv_dpm_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->powerplay.pp_funcs = &kv_dpm_funcs; adev->powerplay.pp_handle = adev; @@ -2965,10 +2965,10 @@ static int kv_dpm_early_init(void *handle) return 0; } -static int kv_dpm_late_init(void *handle) +static int kv_dpm_late_init(struct amdgpu_ip_block *ip_block) { /* powerdown unused blocks for now */ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (!adev->pm.dpm_enabled) return 0; @@ -2979,11 +2979,10 @@ static int kv_dpm_late_init(void *handle) return 0; } -static int kv_dpm_sw_init(void *handle) +static int kv_dpm_sw_init(struct amdgpu_ip_block *ip_block) { int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - + struct amdgpu_device *adev = ip_block->adev; ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 230, &adev->pm.dpm.thermal.irq); if (ret) @@ -3024,9 +3023,9 @@ dpm_failed: return ret; } -static int kv_dpm_sw_fini(void *handle) +static int kv_dpm_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; flush_work(&adev->pm.dpm.thermal.work); @@ -3035,10 +3034,10 @@ static int kv_dpm_sw_fini(void *handle) return 0; } -static int kv_dpm_hw_init(void *handle) +static int kv_dpm_hw_init(struct amdgpu_ip_block *ip_block) { int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (!amdgpu_dpm) return 0; @@ -3053,9 +3052,9 @@ static int kv_dpm_hw_init(void *handle) return ret; } -static int kv_dpm_hw_fini(void *handle) +static int kv_dpm_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->pm.dpm_enabled) kv_dpm_disable(adev); @@ -3063,9 +3062,9 @@ static int kv_dpm_hw_fini(void *handle) return 0; } -static int kv_dpm_suspend(void *handle) +static int kv_dpm_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->pm.dpm_enabled) { /* disable dpm */ @@ -3076,10 +3075,10 @@ static int kv_dpm_suspend(void *handle) return 0; } -static int kv_dpm_resume(void *handle) +static int kv_dpm_resume(struct amdgpu_ip_block *ip_block) { int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->pm.dpm_enabled) { /* asic init will reset to the boot state */ @@ -3100,17 +3099,6 @@ static bool kv_dpm_is_idle(void *handle) return true; } -static int kv_dpm_wait_for_idle(void *handle) -{ - return 0; -} - - -static int kv_dpm_soft_reset(void *handle) -{ - return 0; -} - static int kv_dpm_set_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type, @@ -3314,12 +3302,8 @@ static const struct amd_ip_funcs kv_dpm_ip_funcs = { .suspend = kv_dpm_suspend, .resume = kv_dpm_resume, .is_idle = kv_dpm_is_idle, - .wait_for_idle = kv_dpm_wait_for_idle, - .soft_reset = kv_dpm_soft_reset, .set_clockgating_state = kv_dpm_set_clockgating_state, .set_powergating_state = kv_dpm_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; const struct amdgpu_ip_block_version kv_smu_ip_block = { diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c index a1baa13ab2c2..ee23a0f897c5 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c @@ -4755,13 +4755,15 @@ static int si_populate_memory_timing_parameters(struct amdgpu_device *adev, u32 dram_timing; u32 dram_timing2; u32 burst_time; + int ret; arb_regs->mc_arb_rfsh_rate = (u8)si_calculate_memory_refresh_rate(adev, pl->sclk); - amdgpu_atombios_set_engine_dram_timings(adev, - pl->sclk, - pl->mclk); + ret = amdgpu_atombios_set_engine_dram_timings(adev, pl->sclk, + pl->mclk); + if (ret) + return ret; dram_timing = RREG32(MC_ARB_DRAM_TIMING); dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2); @@ -7619,10 +7621,10 @@ static int si_dpm_process_interrupt(struct amdgpu_device *adev, return 0; } -static int si_dpm_late_init(void *handle) +static int si_dpm_late_init(struct amdgpu_ip_block *ip_block) { int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (!adev->pm.dpm_enabled) return 0; @@ -7716,10 +7718,10 @@ static int si_dpm_init_microcode(struct amdgpu_device *adev) return err; } -static int si_dpm_sw_init(void *handle) +static int si_dpm_sw_init(struct amdgpu_ip_block *ip_block) { int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 230, &adev->pm.dpm.thermal.irq); if (ret) @@ -7763,9 +7765,9 @@ dpm_failed: return ret; } -static int si_dpm_sw_fini(void *handle) +static int si_dpm_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; flush_work(&adev->pm.dpm.thermal.work); @@ -7774,11 +7776,11 @@ static int si_dpm_sw_fini(void *handle) return 0; } -static int si_dpm_hw_init(void *handle) +static int si_dpm_hw_init(struct amdgpu_ip_block *ip_block) { int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (!amdgpu_dpm) return 0; @@ -7793,9 +7795,9 @@ static int si_dpm_hw_init(void *handle) return ret; } -static int si_dpm_hw_fini(void *handle) +static int si_dpm_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->pm.dpm_enabled) si_dpm_disable(adev); @@ -7803,9 +7805,9 @@ static int si_dpm_hw_fini(void *handle) return 0; } -static int si_dpm_suspend(void *handle) +static int si_dpm_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->pm.dpm_enabled) { /* disable dpm */ @@ -7816,10 +7818,10 @@ static int si_dpm_suspend(void *handle) return 0; } -static int si_dpm_resume(void *handle) +static int si_dpm_resume(struct amdgpu_ip_block *ip_block) { int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->pm.dpm_enabled) { /* asic init will reset to the boot state */ @@ -7841,17 +7843,12 @@ static bool si_dpm_is_idle(void *handle) return true; } -static int si_dpm_wait_for_idle(void *handle) +static int si_dpm_wait_for_idle(struct amdgpu_ip_block *ip_block) { /* XXX */ return 0; } -static int si_dpm_soft_reset(void *handle) -{ - return 0; -} - static int si_dpm_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -7928,10 +7925,10 @@ static void si_dpm_print_power_state(void *handle, amdgpu_dpm_print_ps_status(adev, rps); } -static int si_dpm_early_init(void *handle) +static int si_dpm_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->powerplay.pp_funcs = &si_dpm_funcs; adev->powerplay.pp_handle = adev; @@ -8047,11 +8044,8 @@ static const struct amd_ip_funcs si_dpm_ip_funcs = { .resume = si_dpm_resume, .is_idle = si_dpm_is_idle, .wait_for_idle = si_dpm_wait_for_idle, - .soft_reset = si_dpm_soft_reset, .set_clockgating_state = si_dpm_set_clockgating_state, .set_powergating_state = si_dpm_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; const struct amdgpu_ip_block_version si_smu_ip_block = diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c index a71c6117d7e5..26624a716fc6 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c @@ -75,11 +75,10 @@ static void amd_powerplay_destroy(struct amdgpu_device *adev) hwmgr = NULL; } -static int pp_early_init(void *handle) +static int pp_early_init(struct amdgpu_ip_block *ip_block) { int ret; - struct amdgpu_device *adev = handle; - + struct amdgpu_device *adev = ip_block->adev; ret = amd_powerplay_create(adev); if (ret != 0) @@ -131,9 +130,9 @@ static void pp_swctf_delayed_work_handler(struct work_struct *work) orderly_poweroff(true); } -static int pp_sw_init(void *handle) +static int pp_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = handle; + struct amdgpu_device *adev = ip_block->adev; struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; int ret = 0; @@ -148,9 +147,9 @@ static int pp_sw_init(void *handle) return ret; } -static int pp_sw_fini(void *handle) +static int pp_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = handle; + struct amdgpu_device *adev = ip_block->adev; struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; hwmgr_sw_fini(hwmgr); @@ -160,10 +159,10 @@ static int pp_sw_fini(void *handle) return 0; } -static int pp_hw_init(void *handle) +static int pp_hw_init(struct amdgpu_ip_block *ip_block) { int ret = 0; - struct amdgpu_device *adev = handle; + struct amdgpu_device *adev = ip_block->adev; struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; ret = hwmgr_hw_init(hwmgr); @@ -174,10 +173,9 @@ static int pp_hw_init(void *handle) return ret; } -static int pp_hw_fini(void *handle) +static int pp_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = handle; - struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; + struct pp_hwmgr *hwmgr = ip_block->adev->powerplay.pp_handle; cancel_delayed_work_sync(&hwmgr->swctf_delayed_work); @@ -217,9 +215,9 @@ static void pp_reserve_vram_for_smu(struct amdgpu_device *adev) } } -static int pp_late_init(void *handle) +static int pp_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = handle; + struct amdgpu_device *adev = ip_block->adev; struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; if (hwmgr && hwmgr->pm_en) @@ -231,9 +229,9 @@ static int pp_late_init(void *handle) return 0; } -static void pp_late_fini(void *handle) +static void pp_late_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->pm.smu_prv_buffer) amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL); @@ -246,25 +244,15 @@ static bool pp_is_idle(void *handle) return false; } -static int pp_wait_for_idle(void *handle) -{ - return 0; -} - -static int pp_sw_reset(void *handle) -{ - return 0; -} - static int pp_set_powergating_state(void *handle, enum amd_powergating_state state) { return 0; } -static int pp_suspend(void *handle) +static int pp_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = handle; + struct amdgpu_device *adev = ip_block->adev; struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; cancel_delayed_work_sync(&hwmgr->swctf_delayed_work); @@ -272,10 +260,9 @@ static int pp_suspend(void *handle) return hwmgr_suspend(hwmgr); } -static int pp_resume(void *handle) +static int pp_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = handle; - struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; + struct pp_hwmgr *hwmgr = ip_block->adev->powerplay.pp_handle; return hwmgr_resume(hwmgr); } @@ -298,12 +285,8 @@ static const struct amd_ip_funcs pp_ip_funcs = { .suspend = pp_suspend, .resume = pp_resume, .is_idle = pp_is_idle, - .wait_for_idle = pp_wait_for_idle, - .soft_reset = pp_sw_reset, .set_clockgating_state = pp_set_clockgating_state, .set_powergating_state = pp_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; const struct amdgpu_ip_block_version pp_smu_ip_block = diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c index b56298d9da98..fe24219c3bf4 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c @@ -28,7 +28,6 @@ #include "ppatomctrl.h" #include "atombios.h" #include "cgs_common.h" -#include "ppevvmath.h" #define MEM_ID_MASK 0xff000000 #define MEM_ID_SHIFT 24 @@ -677,433 +676,6 @@ bool atomctrl_get_pp_assign_pin( return bRet; } -int atomctrl_calculate_voltage_evv_on_sclk( - struct pp_hwmgr *hwmgr, - uint8_t voltage_type, - uint32_t sclk, - uint16_t virtual_voltage_Id, - uint16_t *voltage, - uint16_t dpm_level, - bool debug) -{ - ATOM_ASIC_PROFILING_INFO_V3_4 *getASICProfilingInfo; - struct amdgpu_device *adev = hwmgr->adev; - EFUSE_LINEAR_FUNC_PARAM sRO_fuse; - EFUSE_LINEAR_FUNC_PARAM sCACm_fuse; - EFUSE_LINEAR_FUNC_PARAM sCACb_fuse; - EFUSE_LOGISTIC_FUNC_PARAM sKt_Beta_fuse; - EFUSE_LOGISTIC_FUNC_PARAM sKv_m_fuse; - EFUSE_LOGISTIC_FUNC_PARAM sKv_b_fuse; - EFUSE_INPUT_PARAMETER sInput_FuseValues; - READ_EFUSE_VALUE_PARAMETER sOutput_FuseValues; - - uint32_t ul_RO_fused, ul_CACb_fused, ul_CACm_fused, ul_Kt_Beta_fused, ul_Kv_m_fused, ul_Kv_b_fused; - fInt fSM_A0, fSM_A1, fSM_A2, fSM_A3, fSM_A4, fSM_A5, fSM_A6, fSM_A7; - fInt fMargin_RO_a, fMargin_RO_b, fMargin_RO_c, fMargin_fixed, fMargin_FMAX_mean, fMargin_Plat_mean, fMargin_FMAX_sigma, fMargin_Plat_sigma, fMargin_DC_sigma; - fInt fLkg_FT, repeat; - fInt fMicro_FMAX, fMicro_CR, fSigma_FMAX, fSigma_CR, fSigma_DC, fDC_SCLK, fSquared_Sigma_DC, fSquared_Sigma_CR, fSquared_Sigma_FMAX; - fInt fRLL_LoadLine, fDerateTDP, fVDDC_base, fA_Term, fC_Term, fB_Term, fRO_DC_margin; - fInt fRO_fused, fCACm_fused, fCACb_fused, fKv_m_fused, fKv_b_fused, fKt_Beta_fused, fFT_Lkg_V0NORM; - fInt fSclk_margin, fSclk, fEVV_V; - fInt fV_min, fV_max, fT_prod, fLKG_Factor, fT_FT, fV_FT, fV_x, fTDP_Power, fTDP_Power_right, fTDP_Power_left, fTDP_Current, fV_NL; - uint32_t ul_FT_Lkg_V0NORM; - fInt fLn_MaxDivMin, fMin, fAverage, fRange; - fInt fRoots[2]; - fInt fStepSize = GetScaledFraction(625, 100000); - - int result; - - getASICProfilingInfo = (ATOM_ASIC_PROFILING_INFO_V3_4 *) - smu_atom_get_data_table(hwmgr->adev, - GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo), - NULL, NULL, NULL); - - if (!getASICProfilingInfo) - return -1; - - if (getASICProfilingInfo->asHeader.ucTableFormatRevision < 3 || - (getASICProfilingInfo->asHeader.ucTableFormatRevision == 3 && - getASICProfilingInfo->asHeader.ucTableContentRevision < 4)) - return -1; - - /*----------------------------------------------------------- - *GETTING MULTI-STEP PARAMETERS RELATED TO CURRENT DPM LEVEL - *----------------------------------------------------------- - */ - fRLL_LoadLine = Divide(getASICProfilingInfo->ulLoadLineSlop, 1000); - - switch (dpm_level) { - case 1: - fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM1), 1000); - break; - case 2: - fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM2), 1000); - break; - case 3: - fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM3), 1000); - break; - case 4: - fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM4), 1000); - break; - case 5: - fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM5), 1000); - break; - case 6: - fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM6), 1000); - break; - case 7: - fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM7), 1000); - break; - default: - pr_err("DPM Level not supported\n"); - fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM0), 1000); - } - - /*------------------------- - * DECODING FUSE VALUES - * ------------------------ - */ - /*Decode RO_Fused*/ - sRO_fuse = getASICProfilingInfo->sRoFuse; - - sInput_FuseValues.usEfuseIndex = sRO_fuse.usEfuseIndex; - sInput_FuseValues.ucBitShift = sRO_fuse.ucEfuseBitLSB; - sInput_FuseValues.ucBitLength = sRO_fuse.ucEfuseLength; - - sOutput_FuseValues.sEfuse = sInput_FuseValues; - - result = amdgpu_atom_execute_table(adev->mode_info.atom_context, - GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues)); - - if (result) - return result; - - /* Finally, the actual fuse value */ - ul_RO_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); - fMin = GetScaledFraction(le32_to_cpu(sRO_fuse.ulEfuseMin), 1); - fRange = GetScaledFraction(le32_to_cpu(sRO_fuse.ulEfuseEncodeRange), 1); - fRO_fused = fDecodeLinearFuse(ul_RO_fused, fMin, fRange, sRO_fuse.ucEfuseLength); - - sCACm_fuse = getASICProfilingInfo->sCACm; - - sInput_FuseValues.usEfuseIndex = sCACm_fuse.usEfuseIndex; - sInput_FuseValues.ucBitShift = sCACm_fuse.ucEfuseBitLSB; - sInput_FuseValues.ucBitLength = sCACm_fuse.ucEfuseLength; - - sOutput_FuseValues.sEfuse = sInput_FuseValues; - - result = amdgpu_atom_execute_table(adev->mode_info.atom_context, - GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues)); - - if (result) - return result; - - ul_CACm_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); - fMin = GetScaledFraction(le32_to_cpu(sCACm_fuse.ulEfuseMin), 1000); - fRange = GetScaledFraction(le32_to_cpu(sCACm_fuse.ulEfuseEncodeRange), 1000); - - fCACm_fused = fDecodeLinearFuse(ul_CACm_fused, fMin, fRange, sCACm_fuse.ucEfuseLength); - - sCACb_fuse = getASICProfilingInfo->sCACb; - - sInput_FuseValues.usEfuseIndex = sCACb_fuse.usEfuseIndex; - sInput_FuseValues.ucBitShift = sCACb_fuse.ucEfuseBitLSB; - sInput_FuseValues.ucBitLength = sCACb_fuse.ucEfuseLength; - sOutput_FuseValues.sEfuse = sInput_FuseValues; - - result = amdgpu_atom_execute_table(adev->mode_info.atom_context, - GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues)); - - if (result) - return result; - - ul_CACb_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); - fMin = GetScaledFraction(le32_to_cpu(sCACb_fuse.ulEfuseMin), 1000); - fRange = GetScaledFraction(le32_to_cpu(sCACb_fuse.ulEfuseEncodeRange), 1000); - - fCACb_fused = fDecodeLinearFuse(ul_CACb_fused, fMin, fRange, sCACb_fuse.ucEfuseLength); - - sKt_Beta_fuse = getASICProfilingInfo->sKt_b; - - sInput_FuseValues.usEfuseIndex = sKt_Beta_fuse.usEfuseIndex; - sInput_FuseValues.ucBitShift = sKt_Beta_fuse.ucEfuseBitLSB; - sInput_FuseValues.ucBitLength = sKt_Beta_fuse.ucEfuseLength; - - sOutput_FuseValues.sEfuse = sInput_FuseValues; - - result = amdgpu_atom_execute_table(adev->mode_info.atom_context, - GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues)); - - if (result) - return result; - - ul_Kt_Beta_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); - fAverage = GetScaledFraction(le32_to_cpu(sKt_Beta_fuse.ulEfuseEncodeAverage), 1000); - fRange = GetScaledFraction(le32_to_cpu(sKt_Beta_fuse.ulEfuseEncodeRange), 1000); - - fKt_Beta_fused = fDecodeLogisticFuse(ul_Kt_Beta_fused, - fAverage, fRange, sKt_Beta_fuse.ucEfuseLength); - - sKv_m_fuse = getASICProfilingInfo->sKv_m; - - sInput_FuseValues.usEfuseIndex = sKv_m_fuse.usEfuseIndex; - sInput_FuseValues.ucBitShift = sKv_m_fuse.ucEfuseBitLSB; - sInput_FuseValues.ucBitLength = sKv_m_fuse.ucEfuseLength; - - sOutput_FuseValues.sEfuse = sInput_FuseValues; - - result = amdgpu_atom_execute_table(adev->mode_info.atom_context, - GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues)); - if (result) - return result; - - ul_Kv_m_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); - fAverage = GetScaledFraction(le32_to_cpu(sKv_m_fuse.ulEfuseEncodeAverage), 1000); - fRange = GetScaledFraction((le32_to_cpu(sKv_m_fuse.ulEfuseEncodeRange) & 0x7fffffff), 1000); - fRange = fMultiply(fRange, ConvertToFraction(-1)); - - fKv_m_fused = fDecodeLogisticFuse(ul_Kv_m_fused, - fAverage, fRange, sKv_m_fuse.ucEfuseLength); - - sKv_b_fuse = getASICProfilingInfo->sKv_b; - - sInput_FuseValues.usEfuseIndex = sKv_b_fuse.usEfuseIndex; - sInput_FuseValues.ucBitShift = sKv_b_fuse.ucEfuseBitLSB; - sInput_FuseValues.ucBitLength = sKv_b_fuse.ucEfuseLength; - sOutput_FuseValues.sEfuse = sInput_FuseValues; - - result = amdgpu_atom_execute_table(adev->mode_info.atom_context, - GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues)); - - if (result) - return result; - - ul_Kv_b_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); - fAverage = GetScaledFraction(le32_to_cpu(sKv_b_fuse.ulEfuseEncodeAverage), 1000); - fRange = GetScaledFraction(le32_to_cpu(sKv_b_fuse.ulEfuseEncodeRange), 1000); - - fKv_b_fused = fDecodeLogisticFuse(ul_Kv_b_fused, - fAverage, fRange, sKv_b_fuse.ucEfuseLength); - - /* Decoding the Leakage - No special struct container */ - /* - * usLkgEuseIndex=56 - * ucLkgEfuseBitLSB=6 - * ucLkgEfuseLength=10 - * ulLkgEncodeLn_MaxDivMin=69077 - * ulLkgEncodeMax=1000000 - * ulLkgEncodeMin=1000 - * ulEfuseLogisticAlpha=13 - */ - - sInput_FuseValues.usEfuseIndex = getASICProfilingInfo->usLkgEuseIndex; - sInput_FuseValues.ucBitShift = getASICProfilingInfo->ucLkgEfuseBitLSB; - sInput_FuseValues.ucBitLength = getASICProfilingInfo->ucLkgEfuseLength; - - sOutput_FuseValues.sEfuse = sInput_FuseValues; - - result = amdgpu_atom_execute_table(adev->mode_info.atom_context, - GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues)); - - if (result) - return result; - - ul_FT_Lkg_V0NORM = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); - fLn_MaxDivMin = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLkgEncodeLn_MaxDivMin), 10000); - fMin = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLkgEncodeMin), 10000); - - fFT_Lkg_V0NORM = fDecodeLeakageID(ul_FT_Lkg_V0NORM, - fLn_MaxDivMin, fMin, getASICProfilingInfo->ucLkgEfuseLength); - fLkg_FT = fFT_Lkg_V0NORM; - - /*------------------------------------------- - * PART 2 - Grabbing all required values - *------------------------------------------- - */ - fSM_A0 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A0), 1000000), - ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A0_sign))); - fSM_A1 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A1), 1000000), - ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A1_sign))); - fSM_A2 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A2), 100000), - ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A2_sign))); - fSM_A3 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A3), 1000000), - ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A3_sign))); - fSM_A4 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A4), 1000000), - ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A4_sign))); - fSM_A5 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A5), 1000), - ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A5_sign))); - fSM_A6 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A6), 1000), - ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A6_sign))); - fSM_A7 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A7), 1000), - ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A7_sign))); - - fMargin_RO_a = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_a)); - fMargin_RO_b = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_b)); - fMargin_RO_c = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_c)); - - fMargin_fixed = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_fixed)); - - fMargin_FMAX_mean = GetScaledFraction( - le32_to_cpu(getASICProfilingInfo->ulMargin_Fmax_mean), 10000); - fMargin_Plat_mean = GetScaledFraction( - le32_to_cpu(getASICProfilingInfo->ulMargin_plat_mean), 10000); - fMargin_FMAX_sigma = GetScaledFraction( - le32_to_cpu(getASICProfilingInfo->ulMargin_Fmax_sigma), 10000); - fMargin_Plat_sigma = GetScaledFraction( - le32_to_cpu(getASICProfilingInfo->ulMargin_plat_sigma), 10000); - - fMargin_DC_sigma = GetScaledFraction( - le32_to_cpu(getASICProfilingInfo->ulMargin_DC_sigma), 100); - fMargin_DC_sigma = fDivide(fMargin_DC_sigma, ConvertToFraction(1000)); - - fCACm_fused = fDivide(fCACm_fused, ConvertToFraction(100)); - fCACb_fused = fDivide(fCACb_fused, ConvertToFraction(100)); - fKt_Beta_fused = fDivide(fKt_Beta_fused, ConvertToFraction(100)); - fKv_m_fused = fNegate(fDivide(fKv_m_fused, ConvertToFraction(100))); - fKv_b_fused = fDivide(fKv_b_fused, ConvertToFraction(10)); - - fSclk = GetScaledFraction(sclk, 100); - - fV_max = fDivide(GetScaledFraction( - le32_to_cpu(getASICProfilingInfo->ulMaxVddc), 1000), ConvertToFraction(4)); - fT_prod = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulBoardCoreTemp), 10); - fLKG_Factor = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulEvvLkgFactor), 100); - fT_FT = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLeakageTemp), 10); - fV_FT = fDivide(GetScaledFraction( - le32_to_cpu(getASICProfilingInfo->ulLeakageVoltage), 1000), ConvertToFraction(4)); - fV_min = fDivide(GetScaledFraction( - le32_to_cpu(getASICProfilingInfo->ulMinVddc), 1000), ConvertToFraction(4)); - - /*----------------------- - * PART 3 - *----------------------- - */ - - fA_Term = fAdd(fMargin_RO_a, fAdd(fMultiply(fSM_A4, fSclk), fSM_A5)); - fB_Term = fAdd(fAdd(fMultiply(fSM_A2, fSclk), fSM_A6), fMargin_RO_b); - fC_Term = fAdd(fMargin_RO_c, - fAdd(fMultiply(fSM_A0, fLkg_FT), - fAdd(fMultiply(fSM_A1, fMultiply(fLkg_FT, fSclk)), - fAdd(fMultiply(fSM_A3, fSclk), - fSubtract(fSM_A7, fRO_fused))))); - - fVDDC_base = fSubtract(fRO_fused, - fSubtract(fMargin_RO_c, - fSubtract(fSM_A3, fMultiply(fSM_A1, fSclk)))); - fVDDC_base = fDivide(fVDDC_base, fAdd(fMultiply(fSM_A0, fSclk), fSM_A2)); - - repeat = fSubtract(fVDDC_base, - fDivide(fMargin_DC_sigma, ConvertToFraction(1000))); - - fRO_DC_margin = fAdd(fMultiply(fMargin_RO_a, - fGetSquare(repeat)), - fAdd(fMultiply(fMargin_RO_b, repeat), - fMargin_RO_c)); - - fDC_SCLK = fSubtract(fRO_fused, - fSubtract(fRO_DC_margin, - fSubtract(fSM_A3, - fMultiply(fSM_A2, repeat)))); - fDC_SCLK = fDivide(fDC_SCLK, fAdd(fMultiply(fSM_A0, repeat), fSM_A1)); - - fSigma_DC = fSubtract(fSclk, fDC_SCLK); - - fMicro_FMAX = fMultiply(fSclk, fMargin_FMAX_mean); - fMicro_CR = fMultiply(fSclk, fMargin_Plat_mean); - fSigma_FMAX = fMultiply(fSclk, fMargin_FMAX_sigma); - fSigma_CR = fMultiply(fSclk, fMargin_Plat_sigma); - - fSquared_Sigma_DC = fGetSquare(fSigma_DC); - fSquared_Sigma_CR = fGetSquare(fSigma_CR); - fSquared_Sigma_FMAX = fGetSquare(fSigma_FMAX); - - fSclk_margin = fAdd(fMicro_FMAX, - fAdd(fMicro_CR, - fAdd(fMargin_fixed, - fSqrt(fAdd(fSquared_Sigma_FMAX, - fAdd(fSquared_Sigma_DC, fSquared_Sigma_CR)))))); - /* - fA_Term = fSM_A4 * (fSclk + fSclk_margin) + fSM_A5; - fB_Term = fSM_A2 * (fSclk + fSclk_margin) + fSM_A6; - fC_Term = fRO_DC_margin + fSM_A0 * fLkg_FT + fSM_A1 * fLkg_FT * (fSclk + fSclk_margin) + fSM_A3 * (fSclk + fSclk_margin) + fSM_A7 - fRO_fused; - */ - - fA_Term = fAdd(fMultiply(fSM_A4, fAdd(fSclk, fSclk_margin)), fSM_A5); - fB_Term = fAdd(fMultiply(fSM_A2, fAdd(fSclk, fSclk_margin)), fSM_A6); - fC_Term = fAdd(fRO_DC_margin, - fAdd(fMultiply(fSM_A0, fLkg_FT), - fAdd(fMultiply(fMultiply(fSM_A1, fLkg_FT), - fAdd(fSclk, fSclk_margin)), - fAdd(fMultiply(fSM_A3, - fAdd(fSclk, fSclk_margin)), - fSubtract(fSM_A7, fRO_fused))))); - - SolveQuadracticEqn(fA_Term, fB_Term, fC_Term, fRoots); - - if (GreaterThan(fRoots[0], fRoots[1])) - fEVV_V = fRoots[1]; - else - fEVV_V = fRoots[0]; - - if (GreaterThan(fV_min, fEVV_V)) - fEVV_V = fV_min; - else if (GreaterThan(fEVV_V, fV_max)) - fEVV_V = fSubtract(fV_max, fStepSize); - - fEVV_V = fRoundUpByStepSize(fEVV_V, fStepSize, 0); - - /*----------------- - * PART 4 - *----------------- - */ - - fV_x = fV_min; - - while (GreaterThan(fAdd(fV_max, fStepSize), fV_x)) { - fTDP_Power_left = fMultiply(fMultiply(fMultiply(fAdd( - fMultiply(fCACm_fused, fV_x), fCACb_fused), fSclk), - fGetSquare(fV_x)), fDerateTDP); - - fTDP_Power_right = fMultiply(fFT_Lkg_V0NORM, fMultiply(fLKG_Factor, - fMultiply(fExponential(fMultiply(fAdd(fMultiply(fKv_m_fused, - fT_prod), fKv_b_fused), fV_x)), fV_x))); - fTDP_Power_right = fMultiply(fTDP_Power_right, fExponential(fMultiply( - fKt_Beta_fused, fT_prod))); - fTDP_Power_right = fDivide(fTDP_Power_right, fExponential(fMultiply( - fAdd(fMultiply(fKv_m_fused, fT_prod), fKv_b_fused), fV_FT))); - fTDP_Power_right = fDivide(fTDP_Power_right, fExponential(fMultiply( - fKt_Beta_fused, fT_FT))); - - fTDP_Power = fAdd(fTDP_Power_left, fTDP_Power_right); - - fTDP_Current = fDivide(fTDP_Power, fV_x); - - fV_NL = fAdd(fV_x, fDivide(fMultiply(fTDP_Current, fRLL_LoadLine), - ConvertToFraction(10))); - - fV_NL = fRoundUpByStepSize(fV_NL, fStepSize, 0); - - if (GreaterThan(fV_max, fV_NL) && - (GreaterThan(fV_NL, fEVV_V) || - Equal(fV_NL, fEVV_V))) { - fV_NL = fMultiply(fV_NL, ConvertToFraction(1000)); - - *voltage = (uint16_t)fV_NL.partial.real; - break; - } else - fV_x = fAdd(fV_x, fStepSize); - } - - return result; -} - /** * atomctrl_get_voltage_evv_on_sclk: gets voltage via call to ATOM COMMAND table. * @hwmgr: input: pointer to hwManager diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.h index 1f987e846628..22b0ac12df97 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.h @@ -316,8 +316,6 @@ extern int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr, pp_atomctrl_clock_dividers_kong *dividers); extern int atomctrl_read_efuse(struct pp_hwmgr *hwmgr, uint16_t start_index, uint16_t end_index, uint32_t *efuse); -extern int atomctrl_calculate_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, - uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage, uint16_t dpm_level, bool debug); extern int atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr *hwmgr, uint32_t clock_value, pp_atomctrl_clock_dividers_ai *dividers); extern int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock, uint8_t level); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppevvmath.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppevvmath.h deleted file mode 100644 index 409aeec6baa9..000000000000 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppevvmath.h +++ /dev/null @@ -1,561 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#include <asm/div64.h> - -enum ppevvmath_constants { - /* We multiply all original integers with 2^SHIFT_AMOUNT to get the fInt representation */ - SHIFT_AMOUNT = 16, - - /* Change this value to change the number of decimal places in the final output - 5 is a good default */ - PRECISION = 5, - - SHIFTED_2 = (2 << SHIFT_AMOUNT), - - /* 32767 - Might change in the future */ - MAX = (1 << (SHIFT_AMOUNT - 1)) - 1, -}; - -/* ------------------------------------------------------------------------------- - * NEW TYPE - fINT - * ------------------------------------------------------------------------------- - * A variable of type fInt can be accessed in 3 ways using the dot (.) operator - * fInt A; - * A.full => The full number as it is. Generally not easy to read - * A.partial.real => Only the integer portion - * A.partial.decimal => Only the fractional portion - */ -typedef union _fInt { - int full; - struct _partial { - unsigned int decimal: SHIFT_AMOUNT; /*Needs to always be unsigned*/ - int real: 32 - SHIFT_AMOUNT; - } partial; -} fInt; - -/* ------------------------------------------------------------------------------- - * Function Declarations - * ------------------------------------------------------------------------------- - */ -static fInt ConvertToFraction(int); /* Use this to convert an INT to a FINT */ -static fInt Convert_ULONG_ToFraction(uint32_t); /* Use this to convert an uint32_t to a FINT */ -static fInt GetScaledFraction(int, int); /* Use this to convert an INT to a FINT after scaling it by a factor */ -static int ConvertBackToInteger(fInt); /* Convert a FINT back to an INT that is scaled by 1000 (i.e. last 3 digits are the decimal digits) */ - -static fInt fNegate(fInt); /* Returns -1 * input fInt value */ -static fInt fAdd (fInt, fInt); /* Returns the sum of two fInt numbers */ -static fInt fSubtract (fInt A, fInt B); /* Returns A-B - Sometimes easier than Adding negative numbers */ -static fInt fMultiply (fInt, fInt); /* Returns the product of two fInt numbers */ -static fInt fDivide (fInt A, fInt B); /* Returns A/B */ -static fInt fGetSquare(fInt); /* Returns the square of a fInt number */ -static fInt fSqrt(fInt); /* Returns the Square Root of a fInt number */ - -static int uAbs(int); /* Returns the Absolute value of the Int */ -static int uPow(int base, int exponent); /* Returns base^exponent an INT */ - -static void SolveQuadracticEqn(fInt, fInt, fInt, fInt[]); /* Returns the 2 roots via the array */ -static bool Equal(fInt, fInt); /* Returns true if two fInts are equal to each other */ -static bool GreaterThan(fInt A, fInt B); /* Returns true if A > B */ - -static fInt fExponential(fInt exponent); /* Can be used to calculate e^exponent */ -static fInt fNaturalLog(fInt value); /* Can be used to calculate ln(value) */ - -/* Fuse decoding functions - * ------------------------------------------------------------------------------------- - */ -static fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t bitlength); -static fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint32_t bitlength); -static fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min, uint32_t bitlength); - -/* Internal Support Functions - Use these ONLY for testing or adding to internal functions - * ------------------------------------------------------------------------------------- - * Some of the following functions take two INTs as their input - This is unsafe for a variety of reasons. - */ -static fInt Divide (int, int); /* Divide two INTs and return result as FINT */ -static fInt fNegate(fInt); - -static int uGetScaledDecimal (fInt); /* Internal function */ -static int GetReal (fInt A); /* Internal function */ - -/* ------------------------------------------------------------------------------------- - * TROUBLESHOOTING INFORMATION - * ------------------------------------------------------------------------------------- - * 1) ConvertToFraction - InputOutOfRangeException: Only accepts numbers smaller than MAX (default: 32767) - * 2) fAdd - OutputOutOfRangeException: Output bigger than MAX (default: 32767) - * 3) fMultiply - OutputOutOfRangeException: - * 4) fGetSquare - OutputOutOfRangeException: - * 5) fDivide - DivideByZeroException - * 6) fSqrt - NegativeSquareRootException: Input cannot be a negative number - */ - -/* ------------------------------------------------------------------------------------- - * START OF CODE - * ------------------------------------------------------------------------------------- - */ -static fInt fExponential(fInt exponent) /*Can be used to calculate e^exponent*/ -{ - uint32_t i; - bool bNegated = false; - - fInt fPositiveOne = ConvertToFraction(1); - fInt fZERO = ConvertToFraction(0); - - fInt lower_bound = Divide(78, 10000); - fInt solution = fPositiveOne; /*Starting off with baseline of 1 */ - fInt error_term; - - static const uint32_t k_array[11] = {55452, 27726, 13863, 6931, 4055, 2231, 1178, 606, 308, 155, 78}; - static const uint32_t expk_array[11] = {2560000, 160000, 40000, 20000, 15000, 12500, 11250, 10625, 10313, 10156, 10078}; - - if (GreaterThan(fZERO, exponent)) { - exponent = fNegate(exponent); - bNegated = true; - } - - while (GreaterThan(exponent, lower_bound)) { - for (i = 0; i < 11; i++) { - if (GreaterThan(exponent, GetScaledFraction(k_array[i], 10000))) { - exponent = fSubtract(exponent, GetScaledFraction(k_array[i], 10000)); - solution = fMultiply(solution, GetScaledFraction(expk_array[i], 10000)); - } - } - } - - error_term = fAdd(fPositiveOne, exponent); - - solution = fMultiply(solution, error_term); - - if (bNegated) - solution = fDivide(fPositiveOne, solution); - - return solution; -} - -static fInt fNaturalLog(fInt value) -{ - uint32_t i; - fInt upper_bound = Divide(8, 1000); - fInt fNegativeOne = ConvertToFraction(-1); - fInt solution = ConvertToFraction(0); /*Starting off with baseline of 0 */ - fInt error_term; - - static const uint32_t k_array[10] = {160000, 40000, 20000, 15000, 12500, 11250, 10625, 10313, 10156, 10078}; - static const uint32_t logk_array[10] = {27726, 13863, 6931, 4055, 2231, 1178, 606, 308, 155, 78}; - - while (GreaterThan(fAdd(value, fNegativeOne), upper_bound)) { - for (i = 0; i < 10; i++) { - if (GreaterThan(value, GetScaledFraction(k_array[i], 10000))) { - value = fDivide(value, GetScaledFraction(k_array[i], 10000)); - solution = fAdd(solution, GetScaledFraction(logk_array[i], 10000)); - } - } - } - - error_term = fAdd(fNegativeOne, value); - - return fAdd(solution, error_term); -} - -static fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t bitlength) -{ - fInt f_fuse_value = Convert_ULONG_ToFraction(fuse_value); - fInt f_bit_max_value = Convert_ULONG_ToFraction((uPow(2, bitlength)) - 1); - - fInt f_decoded_value; - - f_decoded_value = fDivide(f_fuse_value, f_bit_max_value); - f_decoded_value = fMultiply(f_decoded_value, f_range); - f_decoded_value = fAdd(f_decoded_value, f_min); - - return f_decoded_value; -} - - -static fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint32_t bitlength) -{ - fInt f_fuse_value = Convert_ULONG_ToFraction(fuse_value); - fInt f_bit_max_value = Convert_ULONG_ToFraction((uPow(2, bitlength)) - 1); - - fInt f_CONSTANT_NEG13 = ConvertToFraction(-13); - fInt f_CONSTANT1 = ConvertToFraction(1); - - fInt f_decoded_value; - - f_decoded_value = fSubtract(fDivide(f_bit_max_value, f_fuse_value), f_CONSTANT1); - f_decoded_value = fNaturalLog(f_decoded_value); - f_decoded_value = fMultiply(f_decoded_value, fDivide(f_range, f_CONSTANT_NEG13)); - f_decoded_value = fAdd(f_decoded_value, f_average); - - return f_decoded_value; -} - -static fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min, uint32_t bitlength) -{ - fInt fLeakage; - fInt f_bit_max_value = Convert_ULONG_ToFraction((uPow(2, bitlength)) - 1); - - fLeakage = fMultiply(ln_max_div_min, Convert_ULONG_ToFraction(leakageID_fuse)); - fLeakage = fDivide(fLeakage, f_bit_max_value); - fLeakage = fExponential(fLeakage); - fLeakage = fMultiply(fLeakage, f_min); - - return fLeakage; -} - -static fInt ConvertToFraction(int X) /*Add all range checking here. Is it possible to make fInt a private declaration? */ -{ - fInt temp; - - if (X <= MAX) - temp.full = (X << SHIFT_AMOUNT); - else - temp.full = 0; - - return temp; -} - -static fInt fNegate(fInt X) -{ - fInt CONSTANT_NEGONE = ConvertToFraction(-1); - return fMultiply(X, CONSTANT_NEGONE); -} - -static fInt Convert_ULONG_ToFraction(uint32_t X) -{ - fInt temp; - - if (X <= MAX) - temp.full = (X << SHIFT_AMOUNT); - else - temp.full = 0; - - return temp; -} - -static fInt GetScaledFraction(int X, int factor) -{ - int times_shifted, factor_shifted; - bool bNEGATED; - fInt fValue; - - times_shifted = 0; - factor_shifted = 0; - bNEGATED = false; - - if (X < 0) { - X = -1*X; - bNEGATED = true; - } - - if (factor < 0) { - factor = -1*factor; - bNEGATED = !bNEGATED; /*If bNEGATED = true due to X < 0, this will cover the case of negative cancelling negative */ - } - - if ((X > MAX) || factor > MAX) { - if ((X/factor) <= MAX) { - while (X > MAX) { - X = X >> 1; - times_shifted++; - } - - while (factor > MAX) { - factor = factor >> 1; - factor_shifted++; - } - } else { - fValue.full = 0; - return fValue; - } - } - - if (factor == 1) - return ConvertToFraction(X); - - fValue = fDivide(ConvertToFraction(X * uPow(-1, bNEGATED)), ConvertToFraction(factor)); - - fValue.full = fValue.full << times_shifted; - fValue.full = fValue.full >> factor_shifted; - - return fValue; -} - -/* Addition using two fInts */ -static fInt fAdd (fInt X, fInt Y) -{ - fInt Sum; - - Sum.full = X.full + Y.full; - - return Sum; -} - -/* Addition using two fInts */ -static fInt fSubtract (fInt X, fInt Y) -{ - fInt Difference; - - Difference.full = X.full - Y.full; - - return Difference; -} - -static bool Equal(fInt A, fInt B) -{ - if (A.full == B.full) - return true; - else - return false; -} - -static bool GreaterThan(fInt A, fInt B) -{ - if (A.full > B.full) - return true; - else - return false; -} - -static fInt fMultiply (fInt X, fInt Y) /* Uses 64-bit integers (int64_t) */ -{ - fInt Product; - int64_t tempProduct; - - /*The following is for a very specific common case: Non-zero number with ONLY fractional portion*/ - /* TEMPORARILY DISABLED - CAN BE USED TO IMPROVE PRECISION - bool X_LessThanOne, Y_LessThanOne; - - X_LessThanOne = (X.partial.real == 0 && X.partial.decimal != 0 && X.full >= 0); - Y_LessThanOne = (Y.partial.real == 0 && Y.partial.decimal != 0 && Y.full >= 0); - - if (X_LessThanOne && Y_LessThanOne) { - Product.full = X.full * Y.full; - return Product - }*/ - - tempProduct = ((int64_t)X.full) * ((int64_t)Y.full); /*Q(16,16)*Q(16,16) = Q(32, 32) - Might become a negative number! */ - tempProduct = tempProduct >> 16; /*Remove lagging 16 bits - Will lose some precision from decimal; */ - Product.full = (int)tempProduct; /*The int64_t will lose the leading 16 bits that were part of the integer portion */ - - return Product; -} - -static fInt fDivide (fInt X, fInt Y) -{ - fInt fZERO, fQuotient; - int64_t longlongX, longlongY; - - fZERO = ConvertToFraction(0); - - if (Equal(Y, fZERO)) - return fZERO; - - longlongX = (int64_t)X.full; - longlongY = (int64_t)Y.full; - - longlongX = longlongX << 16; /*Q(16,16) -> Q(32,32) */ - - div64_s64(longlongX, longlongY); /*Q(32,32) divided by Q(16,16) = Q(16,16) Back to original format */ - - fQuotient.full = (int)longlongX; - return fQuotient; -} - -static int ConvertBackToInteger (fInt A) /*THIS is the function that will be used to check with the Golden settings table*/ -{ - fInt fullNumber, scaledDecimal, scaledReal; - - scaledReal.full = GetReal(A) * uPow(10, PRECISION-1); /* DOUBLE CHECK THISSSS!!! */ - - scaledDecimal.full = uGetScaledDecimal(A); - - fullNumber = fAdd(scaledDecimal, scaledReal); - - return fullNumber.full; -} - -static fInt fGetSquare(fInt A) -{ - return fMultiply(A, A); -} - -/* x_new = x_old - (x_old^2 - C) / (2 * x_old) */ -static fInt fSqrt(fInt num) -{ - fInt F_divide_Fprime, Fprime; - fInt test; - fInt twoShifted; - int seed, counter, error; - fInt x_new, x_old, C, y; - - fInt fZERO = ConvertToFraction(0); - - /* (0 > num) is the same as (num < 0), i.e., num is negative */ - - if (GreaterThan(fZERO, num) || Equal(fZERO, num)) - return fZERO; - - C = num; - - if (num.partial.real > 3000) - seed = 60; - else if (num.partial.real > 1000) - seed = 30; - else if (num.partial.real > 100) - seed = 10; - else - seed = 2; - - counter = 0; - - if (Equal(num, fZERO)) /*Square Root of Zero is zero */ - return fZERO; - - twoShifted = ConvertToFraction(2); - x_new = ConvertToFraction(seed); - - do { - counter++; - - x_old.full = x_new.full; - - test = fGetSquare(x_old); /*1.75*1.75 is reverting back to 1 when shifted down */ - y = fSubtract(test, C); /*y = f(x) = x^2 - C; */ - - Fprime = fMultiply(twoShifted, x_old); - F_divide_Fprime = fDivide(y, Fprime); - - x_new = fSubtract(x_old, F_divide_Fprime); - - error = ConvertBackToInteger(x_new) - ConvertBackToInteger(x_old); - - if (counter > 20) /*20 is already way too many iterations. If we dont have an answer by then, we never will*/ - return x_new; - - } while (uAbs(error) > 0); - - return x_new; -} - -static void SolveQuadracticEqn(fInt A, fInt B, fInt C, fInt Roots[]) -{ - fInt *pRoots = &Roots[0]; - fInt temp, root_first, root_second; - fInt f_CONSTANT10, f_CONSTANT100; - - f_CONSTANT100 = ConvertToFraction(100); - f_CONSTANT10 = ConvertToFraction(10); - - while (GreaterThan(A, f_CONSTANT100) || GreaterThan(B, f_CONSTANT100) || GreaterThan(C, f_CONSTANT100)) { - A = fDivide(A, f_CONSTANT10); - B = fDivide(B, f_CONSTANT10); - C = fDivide(C, f_CONSTANT10); - } - - temp = fMultiply(ConvertToFraction(4), A); /* root = 4*A */ - temp = fMultiply(temp, C); /* root = 4*A*C */ - temp = fSubtract(fGetSquare(B), temp); /* root = b^2 - 4AC */ - temp = fSqrt(temp); /*root = Sqrt (b^2 - 4AC); */ - - root_first = fSubtract(fNegate(B), temp); /* b - Sqrt(b^2 - 4AC) */ - root_second = fAdd(fNegate(B), temp); /* b + Sqrt(b^2 - 4AC) */ - - root_first = fDivide(root_first, ConvertToFraction(2)); /* [b +- Sqrt(b^2 - 4AC)]/[2] */ - root_first = fDivide(root_first, A); /*[b +- Sqrt(b^2 - 4AC)]/[2*A] */ - - root_second = fDivide(root_second, ConvertToFraction(2)); /* [b +- Sqrt(b^2 - 4AC)]/[2] */ - root_second = fDivide(root_second, A); /*[b +- Sqrt(b^2 - 4AC)]/[2*A] */ - - *(pRoots + 0) = root_first; - *(pRoots + 1) = root_second; -} - -/* ----------------------------------------------------------------------------- - * SUPPORT FUNCTIONS - * ----------------------------------------------------------------------------- - */ - -/* Conversion Functions */ -static int GetReal (fInt A) -{ - return (A.full >> SHIFT_AMOUNT); -} - -static fInt Divide (int X, int Y) -{ - fInt A, B, Quotient; - - A.full = X << SHIFT_AMOUNT; - B.full = Y << SHIFT_AMOUNT; - - Quotient = fDivide(A, B); - - return Quotient; -} - -static int uGetScaledDecimal (fInt A) /*Converts the fractional portion to whole integers - Costly function */ -{ - int dec[PRECISION]; - int i, scaledDecimal = 0, tmp = A.partial.decimal; - - for (i = 0; i < PRECISION; i++) { - dec[i] = tmp / (1 << SHIFT_AMOUNT); - tmp = tmp - ((1 << SHIFT_AMOUNT)*dec[i]); - tmp *= 10; - scaledDecimal = scaledDecimal + dec[i]*uPow(10, PRECISION - 1 - i); - } - - return scaledDecimal; -} - -static int uPow(int base, int power) -{ - if (power == 0) - return 1; - else - return (base)*uPow(base, power - 1); -} - -static int uAbs(int X) -{ - if (X < 0) - return (X * -1); - else - return X; -} - -static fInt fRoundUpByStepSize(fInt A, fInt fStepSize, bool error_term) -{ - fInt solution; - - solution = fDivide(A, fStepSize); - solution.partial.decimal = 0; /*All fractional digits changes to 0 */ - - if (error_term) - solution.partial.real += 1; /*Error term of 1 added */ - - solution = fMultiply(solution, fStepSize); - solution = fAdd(solution, fStepSize); - - return solution; -} - diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c index 79c817752a33..2b446f8866ba 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c @@ -62,578 +62,6 @@ static const void *get_powerplay_table(struct pp_hwmgr *hwmgr) return table_address; } -#if 0 -static void dump_pptable(PPTable_t *pptable) -{ - int i; - - pr_info("Version = 0x%08x\n", pptable->Version); - - pr_info("FeaturesToRun[0] = 0x%08x\n", pptable->FeaturesToRun[0]); - pr_info("FeaturesToRun[1] = 0x%08x\n", pptable->FeaturesToRun[1]); - - pr_info("SocketPowerLimitAc0 = %d\n", pptable->SocketPowerLimitAc0); - pr_info("SocketPowerLimitAc0Tau = %d\n", pptable->SocketPowerLimitAc0Tau); - pr_info("SocketPowerLimitAc1 = %d\n", pptable->SocketPowerLimitAc1); - pr_info("SocketPowerLimitAc1Tau = %d\n", pptable->SocketPowerLimitAc1Tau); - pr_info("SocketPowerLimitAc2 = %d\n", pptable->SocketPowerLimitAc2); - pr_info("SocketPowerLimitAc2Tau = %d\n", pptable->SocketPowerLimitAc2Tau); - pr_info("SocketPowerLimitAc3 = %d\n", pptable->SocketPowerLimitAc3); - pr_info("SocketPowerLimitAc3Tau = %d\n", pptable->SocketPowerLimitAc3Tau); - pr_info("SocketPowerLimitDc = %d\n", pptable->SocketPowerLimitDc); - pr_info("SocketPowerLimitDcTau = %d\n", pptable->SocketPowerLimitDcTau); - pr_info("TdcLimitSoc = %d\n", pptable->TdcLimitSoc); - pr_info("TdcLimitSocTau = %d\n", pptable->TdcLimitSocTau); - pr_info("TdcLimitGfx = %d\n", pptable->TdcLimitGfx); - pr_info("TdcLimitGfxTau = %d\n", pptable->TdcLimitGfxTau); - - pr_info("TedgeLimit = %d\n", pptable->TedgeLimit); - pr_info("ThotspotLimit = %d\n", pptable->ThotspotLimit); - pr_info("ThbmLimit = %d\n", pptable->ThbmLimit); - pr_info("Tvr_gfxLimit = %d\n", pptable->Tvr_gfxLimit); - pr_info("Tvr_memLimit = %d\n", pptable->Tvr_memLimit); - pr_info("Tliquid1Limit = %d\n", pptable->Tliquid1Limit); - pr_info("Tliquid2Limit = %d\n", pptable->Tliquid2Limit); - pr_info("TplxLimit = %d\n", pptable->TplxLimit); - pr_info("FitLimit = %d\n", pptable->FitLimit); - - pr_info("PpmPowerLimit = %d\n", pptable->PpmPowerLimit); - pr_info("PpmTemperatureThreshold = %d\n", pptable->PpmTemperatureThreshold); - - pr_info("MemoryOnPackage = 0x%02x\n", pptable->MemoryOnPackage); - pr_info("padding8_limits = 0x%02x\n", pptable->padding8_limits); - pr_info("Tvr_SocLimit = %d\n", pptable->Tvr_SocLimit); - - pr_info("UlvVoltageOffsetSoc = %d\n", pptable->UlvVoltageOffsetSoc); - pr_info("UlvVoltageOffsetGfx = %d\n", pptable->UlvVoltageOffsetGfx); - - pr_info("UlvSmnclkDid = %d\n", pptable->UlvSmnclkDid); - pr_info("UlvMp1clkDid = %d\n", pptable->UlvMp1clkDid); - pr_info("UlvGfxclkBypass = %d\n", pptable->UlvGfxclkBypass); - pr_info("Padding234 = 0x%02x\n", pptable->Padding234); - - pr_info("MinVoltageGfx = %d\n", pptable->MinVoltageGfx); - pr_info("MinVoltageSoc = %d\n", pptable->MinVoltageSoc); - pr_info("MaxVoltageGfx = %d\n", pptable->MaxVoltageGfx); - pr_info("MaxVoltageSoc = %d\n", pptable->MaxVoltageSoc); - - pr_info("LoadLineResistanceGfx = %d\n", pptable->LoadLineResistanceGfx); - pr_info("LoadLineResistanceSoc = %d\n", pptable->LoadLineResistanceSoc); - - pr_info("[PPCLK_GFXCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", - pptable->DpmDescriptor[PPCLK_GFXCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_GFXCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_GFXCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_GFXCLK].padding, - pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.c); - - pr_info("[PPCLK_VCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", - pptable->DpmDescriptor[PPCLK_VCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_VCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_VCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_VCLK].padding, - pptable->DpmDescriptor[PPCLK_VCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_VCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.c); - - pr_info("[PPCLK_DCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", - pptable->DpmDescriptor[PPCLK_DCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_DCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_DCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_DCLK].padding, - pptable->DpmDescriptor[PPCLK_DCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_DCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.c); - - pr_info("[PPCLK_ECLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", - pptable->DpmDescriptor[PPCLK_ECLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_ECLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_ECLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_ECLK].padding, - pptable->DpmDescriptor[PPCLK_ECLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_ECLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_ECLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_ECLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_ECLK].SsCurve.c); - - pr_info("[PPCLK_SOCCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", - pptable->DpmDescriptor[PPCLK_SOCCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_SOCCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_SOCCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_SOCCLK].padding, - pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.c); - - pr_info("[PPCLK_UCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", - pptable->DpmDescriptor[PPCLK_UCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_UCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_UCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_UCLK].padding, - pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.c); - - pr_info("[PPCLK_DCEFCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", - pptable->DpmDescriptor[PPCLK_DCEFCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_DCEFCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_DCEFCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_DCEFCLK].padding, - pptable->DpmDescriptor[PPCLK_DCEFCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_DCEFCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_DCEFCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_DCEFCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_DCEFCLK].SsCurve.c); - - pr_info("[PPCLK_DISPCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", - pptable->DpmDescriptor[PPCLK_DISPCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_DISPCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_DISPCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_DISPCLK].padding, - pptable->DpmDescriptor[PPCLK_DISPCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_DISPCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_DISPCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_DISPCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_DISPCLK].SsCurve.c); - - pr_info("[PPCLK_PIXCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", - pptable->DpmDescriptor[PPCLK_PIXCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_PIXCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_PIXCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_PIXCLK].padding, - pptable->DpmDescriptor[PPCLK_PIXCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_PIXCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_PIXCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_PIXCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_PIXCLK].SsCurve.c); - - pr_info("[PPCLK_PHYCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", - pptable->DpmDescriptor[PPCLK_PHYCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_PHYCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_PHYCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_PHYCLK].padding, - pptable->DpmDescriptor[PPCLK_PHYCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_PHYCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_PHYCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_PHYCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_PHYCLK].SsCurve.c); - - pr_info("[PPCLK_FCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", - pptable->DpmDescriptor[PPCLK_FCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_FCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_FCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_FCLK].padding, - pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.c); - - - pr_info("FreqTableGfx\n"); - for (i = 0; i < NUM_GFXCLK_DPM_LEVELS; i++) - pr_info(" .[%02d] = %d\n", i, pptable->FreqTableGfx[i]); - - pr_info("FreqTableVclk\n"); - for (i = 0; i < NUM_VCLK_DPM_LEVELS; i++) - pr_info(" .[%02d] = %d\n", i, pptable->FreqTableVclk[i]); - - pr_info("FreqTableDclk\n"); - for (i = 0; i < NUM_DCLK_DPM_LEVELS; i++) - pr_info(" .[%02d] = %d\n", i, pptable->FreqTableDclk[i]); - - pr_info("FreqTableEclk\n"); - for (i = 0; i < NUM_ECLK_DPM_LEVELS; i++) - pr_info(" .[%02d] = %d\n", i, pptable->FreqTableEclk[i]); - - pr_info("FreqTableSocclk\n"); - for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) - pr_info(" .[%02d] = %d\n", i, pptable->FreqTableSocclk[i]); - - pr_info("FreqTableUclk\n"); - for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) - pr_info(" .[%02d] = %d\n", i, pptable->FreqTableUclk[i]); - - pr_info("FreqTableFclk\n"); - for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) - pr_info(" .[%02d] = %d\n", i, pptable->FreqTableFclk[i]); - - pr_info("FreqTableDcefclk\n"); - for (i = 0; i < NUM_DCEFCLK_DPM_LEVELS; i++) - pr_info(" .[%02d] = %d\n", i, pptable->FreqTableDcefclk[i]); - - pr_info("FreqTableDispclk\n"); - for (i = 0; i < NUM_DISPCLK_DPM_LEVELS; i++) - pr_info(" .[%02d] = %d\n", i, pptable->FreqTableDispclk[i]); - - pr_info("FreqTablePixclk\n"); - for (i = 0; i < NUM_PIXCLK_DPM_LEVELS; i++) - pr_info(" .[%02d] = %d\n", i, pptable->FreqTablePixclk[i]); - - pr_info("FreqTablePhyclk\n"); - for (i = 0; i < NUM_PHYCLK_DPM_LEVELS; i++) - pr_info(" .[%02d] = %d\n", i, pptable->FreqTablePhyclk[i]); - - pr_info("DcModeMaxFreq[PPCLK_GFXCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_GFXCLK]); - pr_info("DcModeMaxFreq[PPCLK_VCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_VCLK]); - pr_info("DcModeMaxFreq[PPCLK_DCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_DCLK]); - pr_info("DcModeMaxFreq[PPCLK_ECLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_ECLK]); - pr_info("DcModeMaxFreq[PPCLK_SOCCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_SOCCLK]); - pr_info("DcModeMaxFreq[PPCLK_UCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_UCLK]); - pr_info("DcModeMaxFreq[PPCLK_DCEFCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_DCEFCLK]); - pr_info("DcModeMaxFreq[PPCLK_DISPCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_DISPCLK]); - pr_info("DcModeMaxFreq[PPCLK_PIXCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_PIXCLK]); - pr_info("DcModeMaxFreq[PPCLK_PHYCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_PHYCLK]); - pr_info("DcModeMaxFreq[PPCLK_FCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_FCLK]); - pr_info("Padding8_Clks = %d\n", pptable->Padding8_Clks); - - pr_info("Mp0clkFreq\n"); - for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++) - pr_info(" .[%d] = %d\n", i, pptable->Mp0clkFreq[i]); - - pr_info("Mp0DpmVoltage\n"); - for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++) - pr_info(" .[%d] = %d\n", i, pptable->Mp0DpmVoltage[i]); - - pr_info("GfxclkFidle = 0x%x\n", pptable->GfxclkFidle); - pr_info("GfxclkSlewRate = 0x%x\n", pptable->GfxclkSlewRate); - pr_info("CksEnableFreq = 0x%x\n", pptable->CksEnableFreq); - pr_info("Padding789 = 0x%x\n", pptable->Padding789); - pr_info("CksVoltageOffset[a = 0x%08x b = 0x%08x c = 0x%08x]\n", - pptable->CksVoltageOffset.a, - pptable->CksVoltageOffset.b, - pptable->CksVoltageOffset.c); - pr_info("Padding567[0] = 0x%x\n", pptable->Padding567[0]); - pr_info("Padding567[1] = 0x%x\n", pptable->Padding567[1]); - pr_info("Padding567[2] = 0x%x\n", pptable->Padding567[2]); - pr_info("Padding567[3] = 0x%x\n", pptable->Padding567[3]); - pr_info("GfxclkDsMaxFreq = %d\n", pptable->GfxclkDsMaxFreq); - pr_info("GfxclkSource = 0x%x\n", pptable->GfxclkSource); - pr_info("Padding456 = 0x%x\n", pptable->Padding456); - - pr_info("LowestUclkReservedForUlv = %d\n", pptable->LowestUclkReservedForUlv); - pr_info("Padding8_Uclk[0] = 0x%x\n", pptable->Padding8_Uclk[0]); - pr_info("Padding8_Uclk[1] = 0x%x\n", pptable->Padding8_Uclk[1]); - pr_info("Padding8_Uclk[2] = 0x%x\n", pptable->Padding8_Uclk[2]); - - pr_info("PcieGenSpeed\n"); - for (i = 0; i < NUM_LINK_LEVELS; i++) - pr_info(" .[%d] = %d\n", i, pptable->PcieGenSpeed[i]); - - pr_info("PcieLaneCount\n"); - for (i = 0; i < NUM_LINK_LEVELS; i++) - pr_info(" .[%d] = %d\n", i, pptable->PcieLaneCount[i]); - - pr_info("LclkFreq\n"); - for (i = 0; i < NUM_LINK_LEVELS; i++) - pr_info(" .[%d] = %d\n", i, pptable->LclkFreq[i]); - - pr_info("EnableTdpm = %d\n", pptable->EnableTdpm); - pr_info("TdpmHighHystTemperature = %d\n", pptable->TdpmHighHystTemperature); - pr_info("TdpmLowHystTemperature = %d\n", pptable->TdpmLowHystTemperature); - pr_info("GfxclkFreqHighTempLimit = %d\n", pptable->GfxclkFreqHighTempLimit); - - pr_info("FanStopTemp = %d\n", pptable->FanStopTemp); - pr_info("FanStartTemp = %d\n", pptable->FanStartTemp); - - pr_info("FanGainEdge = %d\n", pptable->FanGainEdge); - pr_info("FanGainHotspot = %d\n", pptable->FanGainHotspot); - pr_info("FanGainLiquid = %d\n", pptable->FanGainLiquid); - pr_info("FanGainVrGfx = %d\n", pptable->FanGainVrGfx); - pr_info("FanGainVrSoc = %d\n", pptable->FanGainVrSoc); - pr_info("FanGainPlx = %d\n", pptable->FanGainPlx); - pr_info("FanGainHbm = %d\n", pptable->FanGainHbm); - pr_info("FanPwmMin = %d\n", pptable->FanPwmMin); - pr_info("FanAcousticLimitRpm = %d\n", pptable->FanAcousticLimitRpm); - pr_info("FanThrottlingRpm = %d\n", pptable->FanThrottlingRpm); - pr_info("FanMaximumRpm = %d\n", pptable->FanMaximumRpm); - pr_info("FanTargetTemperature = %d\n", pptable->FanTargetTemperature); - pr_info("FanTargetGfxclk = %d\n", pptable->FanTargetGfxclk); - pr_info("FanZeroRpmEnable = %d\n", pptable->FanZeroRpmEnable); - pr_info("FanTachEdgePerRev = %d\n", pptable->FanTachEdgePerRev); - - pr_info("FuzzyFan_ErrorSetDelta = %d\n", pptable->FuzzyFan_ErrorSetDelta); - pr_info("FuzzyFan_ErrorRateSetDelta = %d\n", pptable->FuzzyFan_ErrorRateSetDelta); - pr_info("FuzzyFan_PwmSetDelta = %d\n", pptable->FuzzyFan_PwmSetDelta); - pr_info("FuzzyFan_Reserved = %d\n", pptable->FuzzyFan_Reserved); - - pr_info("OverrideAvfsGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_GFX]); - pr_info("OverrideAvfsGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_SOC]); - pr_info("Padding8_Avfs[0] = %d\n", pptable->Padding8_Avfs[0]); - pr_info("Padding8_Avfs[1] = %d\n", pptable->Padding8_Avfs[1]); - - pr_info("qAvfsGb[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qAvfsGb[AVFS_VOLTAGE_GFX].a, - pptable->qAvfsGb[AVFS_VOLTAGE_GFX].b, - pptable->qAvfsGb[AVFS_VOLTAGE_GFX].c); - pr_info("qAvfsGb[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qAvfsGb[AVFS_VOLTAGE_SOC].a, - pptable->qAvfsGb[AVFS_VOLTAGE_SOC].b, - pptable->qAvfsGb[AVFS_VOLTAGE_SOC].c); - pr_info("dBtcGbGfxCksOn{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbGfxCksOn.a, - pptable->dBtcGbGfxCksOn.b, - pptable->dBtcGbGfxCksOn.c); - pr_info("dBtcGbGfxCksOff{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbGfxCksOff.a, - pptable->dBtcGbGfxCksOff.b, - pptable->dBtcGbGfxCksOff.c); - pr_info("dBtcGbGfxAfll{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbGfxAfll.a, - pptable->dBtcGbGfxAfll.b, - pptable->dBtcGbGfxAfll.c); - pr_info("dBtcGbSoc{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbSoc.a, - pptable->dBtcGbSoc.b, - pptable->dBtcGbSoc.c); - pr_info("qAgingGb[AVFS_VOLTAGE_GFX]{m = 0x%x b = 0x%x}\n", - pptable->qAgingGb[AVFS_VOLTAGE_GFX].m, - pptable->qAgingGb[AVFS_VOLTAGE_GFX].b); - pr_info("qAgingGb[AVFS_VOLTAGE_SOC]{m = 0x%x b = 0x%x}\n", - pptable->qAgingGb[AVFS_VOLTAGE_SOC].m, - pptable->qAgingGb[AVFS_VOLTAGE_SOC].b); - - pr_info("qStaticVoltageOffset[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].a, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].b, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].c); - pr_info("qStaticVoltageOffset[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].a, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].b, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].c); - - pr_info("DcTol[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_GFX]); - pr_info("DcTol[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_SOC]); - - pr_info("DcBtcEnabled[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_GFX]); - pr_info("DcBtcEnabled[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_SOC]); - pr_info("Padding8_GfxBtc[0] = 0x%x\n", pptable->Padding8_GfxBtc[0]); - pr_info("Padding8_GfxBtc[1] = 0x%x\n", pptable->Padding8_GfxBtc[1]); - - pr_info("DcBtcMin[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_GFX]); - pr_info("DcBtcMin[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_SOC]); - pr_info("DcBtcMax[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_GFX]); - pr_info("DcBtcMax[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_SOC]); - - pr_info("XgmiLinkSpeed\n"); - for (i = 0; i < NUM_XGMI_LEVELS; i++) - pr_info(" .[%d] = %d\n", i, pptable->XgmiLinkSpeed[i]); - pr_info("XgmiLinkWidth\n"); - for (i = 0; i < NUM_XGMI_LEVELS; i++) - pr_info(" .[%d] = %d\n", i, pptable->XgmiLinkWidth[i]); - pr_info("XgmiFclkFreq\n"); - for (i = 0; i < NUM_XGMI_LEVELS; i++) - pr_info(" .[%d] = %d\n", i, pptable->XgmiFclkFreq[i]); - pr_info("XgmiUclkFreq\n"); - for (i = 0; i < NUM_XGMI_LEVELS; i++) - pr_info(" .[%d] = %d\n", i, pptable->XgmiUclkFreq[i]); - pr_info("XgmiSocclkFreq\n"); - for (i = 0; i < NUM_XGMI_LEVELS; i++) - pr_info(" .[%d] = %d\n", i, pptable->XgmiSocclkFreq[i]); - pr_info("XgmiSocVoltage\n"); - for (i = 0; i < NUM_XGMI_LEVELS; i++) - pr_info(" .[%d] = %d\n", i, pptable->XgmiSocVoltage[i]); - - pr_info("DebugOverrides = 0x%x\n", pptable->DebugOverrides); - pr_info("ReservedEquation0{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation0.a, - pptable->ReservedEquation0.b, - pptable->ReservedEquation0.c); - pr_info("ReservedEquation1{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation1.a, - pptable->ReservedEquation1.b, - pptable->ReservedEquation1.c); - pr_info("ReservedEquation2{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation2.a, - pptable->ReservedEquation2.b, - pptable->ReservedEquation2.c); - pr_info("ReservedEquation3{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation3.a, - pptable->ReservedEquation3.b, - pptable->ReservedEquation3.c); - - pr_info("MinVoltageUlvGfx = %d\n", pptable->MinVoltageUlvGfx); - pr_info("MinVoltageUlvSoc = %d\n", pptable->MinVoltageUlvSoc); - - pr_info("MGpuFanBoostLimitRpm = %d\n", pptable->MGpuFanBoostLimitRpm); - pr_info("padding16_Fan = %d\n", pptable->padding16_Fan); - - pr_info("FanGainVrMem0 = %d\n", pptable->FanGainVrMem0); - pr_info("FanGainVrMem0 = %d\n", pptable->FanGainVrMem0); - - pr_info("DcBtcGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_GFX]); - pr_info("DcBtcGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_SOC]); - - for (i = 0; i < 11; i++) - pr_info("Reserved[%d] = 0x%x\n", i, pptable->Reserved[i]); - - for (i = 0; i < 3; i++) - pr_info("Padding32[%d] = 0x%x\n", i, pptable->Padding32[i]); - - pr_info("MaxVoltageStepGfx = 0x%x\n", pptable->MaxVoltageStepGfx); - pr_info("MaxVoltageStepSoc = 0x%x\n", pptable->MaxVoltageStepSoc); - - pr_info("VddGfxVrMapping = 0x%x\n", pptable->VddGfxVrMapping); - pr_info("VddSocVrMapping = 0x%x\n", pptable->VddSocVrMapping); - pr_info("VddMem0VrMapping = 0x%x\n", pptable->VddMem0VrMapping); - pr_info("VddMem1VrMapping = 0x%x\n", pptable->VddMem1VrMapping); - - pr_info("GfxUlvPhaseSheddingMask = 0x%x\n", pptable->GfxUlvPhaseSheddingMask); - pr_info("SocUlvPhaseSheddingMask = 0x%x\n", pptable->SocUlvPhaseSheddingMask); - pr_info("ExternalSensorPresent = 0x%x\n", pptable->ExternalSensorPresent); - pr_info("Padding8_V = 0x%x\n", pptable->Padding8_V); - - pr_info("GfxMaxCurrent = 0x%x\n", pptable->GfxMaxCurrent); - pr_info("GfxOffset = 0x%x\n", pptable->GfxOffset); - pr_info("Padding_TelemetryGfx = 0x%x\n", pptable->Padding_TelemetryGfx); - - pr_info("SocMaxCurrent = 0x%x\n", pptable->SocMaxCurrent); - pr_info("SocOffset = 0x%x\n", pptable->SocOffset); - pr_info("Padding_TelemetrySoc = 0x%x\n", pptable->Padding_TelemetrySoc); - - pr_info("Mem0MaxCurrent = 0x%x\n", pptable->Mem0MaxCurrent); - pr_info("Mem0Offset = 0x%x\n", pptable->Mem0Offset); - pr_info("Padding_TelemetryMem0 = 0x%x\n", pptable->Padding_TelemetryMem0); - - pr_info("Mem1MaxCurrent = 0x%x\n", pptable->Mem1MaxCurrent); - pr_info("Mem1Offset = 0x%x\n", pptable->Mem1Offset); - pr_info("Padding_TelemetryMem1 = 0x%x\n", pptable->Padding_TelemetryMem1); - - pr_info("AcDcGpio = %d\n", pptable->AcDcGpio); - pr_info("AcDcPolarity = %d\n", pptable->AcDcPolarity); - pr_info("VR0HotGpio = %d\n", pptable->VR0HotGpio); - pr_info("VR0HotPolarity = %d\n", pptable->VR0HotPolarity); - - pr_info("VR1HotGpio = %d\n", pptable->VR1HotGpio); - pr_info("VR1HotPolarity = %d\n", pptable->VR1HotPolarity); - pr_info("Padding1 = 0x%x\n", pptable->Padding1); - pr_info("Padding2 = 0x%x\n", pptable->Padding2); - - pr_info("LedPin0 = %d\n", pptable->LedPin0); - pr_info("LedPin1 = %d\n", pptable->LedPin1); - pr_info("LedPin2 = %d\n", pptable->LedPin2); - pr_info("padding8_4 = 0x%x\n", pptable->padding8_4); - - pr_info("PllGfxclkSpreadEnabled = %d\n", pptable->PllGfxclkSpreadEnabled); - pr_info("PllGfxclkSpreadPercent = %d\n", pptable->PllGfxclkSpreadPercent); - pr_info("PllGfxclkSpreadFreq = %d\n", pptable->PllGfxclkSpreadFreq); - - pr_info("UclkSpreadEnabled = %d\n", pptable->UclkSpreadEnabled); - pr_info("UclkSpreadPercent = %d\n", pptable->UclkSpreadPercent); - pr_info("UclkSpreadFreq = %d\n", pptable->UclkSpreadFreq); - - pr_info("FclkSpreadEnabled = %d\n", pptable->FclkSpreadEnabled); - pr_info("FclkSpreadPercent = %d\n", pptable->FclkSpreadPercent); - pr_info("FclkSpreadFreq = %d\n", pptable->FclkSpreadFreq); - - pr_info("FllGfxclkSpreadEnabled = %d\n", pptable->FllGfxclkSpreadEnabled); - pr_info("FllGfxclkSpreadPercent = %d\n", pptable->FllGfxclkSpreadPercent); - pr_info("FllGfxclkSpreadFreq = %d\n", pptable->FllGfxclkSpreadFreq); - - for (i = 0; i < I2C_CONTROLLER_NAME_COUNT; i++) { - pr_info("I2cControllers[%d]:\n", i); - pr_info(" .Enabled = %d\n", - pptable->I2cControllers[i].Enabled); - pr_info(" .SlaveAddress = 0x%x\n", - pptable->I2cControllers[i].SlaveAddress); - pr_info(" .ControllerPort = %d\n", - pptable->I2cControllers[i].ControllerPort); - pr_info(" .ControllerName = %d\n", - pptable->I2cControllers[i].ControllerName); - pr_info(" .ThermalThrottler = %d\n", - pptable->I2cControllers[i].ThermalThrottler); - pr_info(" .I2cProtocol = %d\n", - pptable->I2cControllers[i].I2cProtocol); - pr_info(" .I2cSpeed = %d\n", - pptable->I2cControllers[i].I2cSpeed); - } - - for (i = 0; i < 10; i++) - pr_info("BoardReserved[%d] = 0x%x\n", i, pptable->BoardReserved[i]); - - for (i = 0; i < 8; i++) - pr_info("MmHubPadding[%d] = 0x%x\n", i, pptable->MmHubPadding[i]); -} -#endif - static int check_powerplay_tables( struct pp_hwmgr *hwmgr, const ATOM_Vega20_POWERPLAYTABLE *powerplay_table) @@ -652,8 +80,6 @@ static int check_powerplay_tables( return -EINVAL; } - //dump_pptable(&powerplay_table->smcPPTable); - return 0; } diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h index 9118fcddbf11..227bf0e84a13 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h @@ -60,7 +60,7 @@ struct vi_dpm_level { struct vi_dpm_table { uint32_t count; - struct vi_dpm_level dpm_level[] __counted_by(count); + struct vi_dpm_level dpm_level[]; }; #define PCIE_PERF_REQ_REMOVE_REGISTRY 0 @@ -91,7 +91,7 @@ struct phm_set_power_state_input { struct phm_clock_array { uint32_t count; - uint32_t values[] __counted_by(count); + uint32_t values[]; }; struct phm_clock_voltage_dependency_record { @@ -123,7 +123,7 @@ struct phm_acpclock_voltage_dependency_record { struct phm_clock_voltage_dependency_table { uint32_t count; - struct phm_clock_voltage_dependency_record entries[] __counted_by(count); + struct phm_clock_voltage_dependency_record entries[]; }; struct phm_phase_shedding_limits_record { @@ -140,7 +140,7 @@ struct phm_uvd_clock_voltage_dependency_record { struct phm_uvd_clock_voltage_dependency_table { uint8_t count; - struct phm_uvd_clock_voltage_dependency_record entries[] __counted_by(count); + struct phm_uvd_clock_voltage_dependency_record entries[]; }; struct phm_acp_clock_voltage_dependency_record { @@ -150,7 +150,7 @@ struct phm_acp_clock_voltage_dependency_record { struct phm_acp_clock_voltage_dependency_table { uint32_t count; - struct phm_acp_clock_voltage_dependency_record entries[] __counted_by(count); + struct phm_acp_clock_voltage_dependency_record entries[]; }; struct phm_vce_clock_voltage_dependency_record { @@ -161,32 +161,32 @@ struct phm_vce_clock_voltage_dependency_record { struct phm_phase_shedding_limits_table { uint32_t count; - struct phm_phase_shedding_limits_record entries[] __counted_by(count); + struct phm_phase_shedding_limits_record entries[]; }; struct phm_vceclock_voltage_dependency_table { uint8_t count; - struct phm_vceclock_voltage_dependency_record entries[] __counted_by(count); + struct phm_vceclock_voltage_dependency_record entries[]; }; struct phm_uvdclock_voltage_dependency_table { uint8_t count; - struct phm_uvdclock_voltage_dependency_record entries[] __counted_by(count); + struct phm_uvdclock_voltage_dependency_record entries[]; }; struct phm_samuclock_voltage_dependency_table { uint8_t count; - struct phm_samuclock_voltage_dependency_record entries[] __counted_by(count); + struct phm_samuclock_voltage_dependency_record entries[]; }; struct phm_acpclock_voltage_dependency_table { uint32_t count; - struct phm_acpclock_voltage_dependency_record entries[] __counted_by(count); + struct phm_acpclock_voltage_dependency_record entries[]; }; struct phm_vce_clock_voltage_dependency_table { uint8_t count; - struct phm_vce_clock_voltage_dependency_record entries[] __counted_by(count); + struct phm_vce_clock_voltage_dependency_record entries[]; }; @@ -393,7 +393,7 @@ union phm_cac_leakage_record { struct phm_cac_leakage_table { uint32_t count; - union phm_cac_leakage_record entries[] __counted_by(count); + union phm_cac_leakage_record entries[]; }; struct phm_samu_clock_voltage_dependency_record { @@ -404,7 +404,7 @@ struct phm_samu_clock_voltage_dependency_record { struct phm_samu_clock_voltage_dependency_table { uint8_t count; - struct phm_samu_clock_voltage_dependency_record entries[] __counted_by(count); + struct phm_samu_clock_voltage_dependency_record entries[]; }; struct phm_cac_tdp_table { diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c index b52ce135d84d..d3ff6a831ed5 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c @@ -257,20 +257,18 @@ static int vega12_smu_init(struct pp_hwmgr *hwmgr) priv->smu_tables.entry[TABLE_WATERMARKS].size = sizeof(Watermarks_t); tools_size = 0x19000; - if (tools_size) { - ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev, - tools_size, - PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - &priv->smu_tables.entry[TABLE_PMSTATUSLOG].handle, - &priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr, - &priv->smu_tables.entry[TABLE_PMSTATUSLOG].table); - if (ret) - goto err1; + ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev, + tools_size, + PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &priv->smu_tables.entry[TABLE_PMSTATUSLOG].handle, + &priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr, + &priv->smu_tables.entry[TABLE_PMSTATUSLOG].table); + if (ret) + goto err1; - priv->smu_tables.entry[TABLE_PMSTATUSLOG].version = 0x01; - priv->smu_tables.entry[TABLE_PMSTATUSLOG].size = tools_size; - } + priv->smu_tables.entry[TABLE_PMSTATUSLOG].version = 0x01; + priv->smu_tables.entry[TABLE_PMSTATUSLOG].size = tools_size; /* allocate space for AVFS Fuse table */ ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev, diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index bb3bc68dfc39..5eae14fe79f1 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -72,6 +72,10 @@ static int smu_set_power_limit(void *handle, uint32_t limit); static int smu_set_fan_speed_rpm(void *handle, uint32_t speed); static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled); static int smu_set_mp1_state(void *handle, enum pp_mp1_state mp1_state); +static void smu_power_profile_mode_get(struct smu_context *smu, + enum PP_SMC_POWER_PROFILE profile_mode); +static void smu_power_profile_mode_put(struct smu_context *smu, + enum PP_SMC_POWER_PROFILE profile_mode); static int smu_sys_get_pp_feature_mask(void *handle, char *buf) @@ -140,7 +144,8 @@ int smu_set_soft_freq_range(struct smu_context *smu, ret = smu->ppt_funcs->set_soft_freq_limited_range(smu, clk_type, min, - max); + max, + false); return ret; } @@ -251,7 +256,7 @@ static int smu_dpm_set_vcn_enable(struct smu_context *smu, if (atomic_read(&power_gate->vcn_gated) ^ enable) return 0; - ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable); + ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable, 0xff); if (!ret) atomic_set(&power_gate->vcn_gated, !enable); @@ -549,7 +554,8 @@ bool is_support_sw_smu(struct amdgpu_device *adev) if (adev->asic_type == CHIP_VEGA20) return false; - if (amdgpu_ip_version(adev, MP1_HWIP, 0) >= IP_VERSION(11, 0, 0)) + if ((amdgpu_ip_version(adev, MP1_HWIP, 0) >= IP_VERSION(11, 0, 0)) && + amdgpu_device_ip_is_valid(adev, AMD_IP_BLOCK_TYPE_SMC)) return true; return false; @@ -741,9 +747,9 @@ static int smu_set_funcs(struct amdgpu_device *adev) return 0; } -static int smu_early_init(void *handle) +static int smu_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct smu_context *smu; int r; @@ -825,9 +831,9 @@ static int smu_apply_default_config_table_settings(struct smu_context *smu) return smu_set_config_table(smu, &adev->pm.config_table); } -static int smu_late_init(void *handle) +static int smu_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct smu_context *smu = adev->powerplay.pp_handle; int ret = 0; @@ -1234,9 +1240,17 @@ static void smu_init_xgmi_plpd_mode(struct smu_context *smu) } } -static int smu_sw_init(void *handle) +static bool smu_is_workload_profile_available(struct smu_context *smu, + u32 profile) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (profile >= PP_SMC_POWER_PROFILE_COUNT) + return false; + return smu->workload_map && smu->workload_map[profile].valid_mapping; +} + +static int smu_sw_init(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; struct smu_context *smu = adev->powerplay.pp_handle; int ret; @@ -1249,30 +1263,19 @@ static int smu_sw_init(void *handle) INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn); atomic64_set(&smu->throttle_int_counter, 0); smu->watermarks_bitmap = 0; - smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; - smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; atomic_set(&smu->smu_power.power_gate.vcn_gated, 1); atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1); atomic_set(&smu->smu_power.power_gate.vpe_gated, 1); atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1); - smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0; - smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1; - smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2; - smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3; - smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4; - smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5; - smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6; - smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; - - smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; - smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D; - smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING; - smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO; - smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR; - smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE; - smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM; + if (smu->is_apu || + !smu_is_workload_profile_available(smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D)) + smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; + else + smu->power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D; + smu_power_profile_mode_get(smu, smu->power_profile_mode); + smu->display_config = &adev->pm.pm_display_cfg; smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; @@ -1313,9 +1316,9 @@ static int smu_sw_init(void *handle) return 0; } -static int smu_sw_fini(void *handle) +static int smu_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct smu_context *smu = adev->powerplay.pp_handle; int ret; @@ -1325,6 +1328,11 @@ static int smu_sw_fini(void *handle) return ret; } + if (smu->custom_profile_params) { + kfree(smu->custom_profile_params); + smu->custom_profile_params = NULL; + } + smu_fini_microcode(smu); return 0; @@ -1682,7 +1690,9 @@ static int smu_smc_hw_setup(struct smu_context *smu) return ret; } - if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4) + if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5) + pcie_gen = 4; + else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4) pcie_gen = 3; else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) pcie_gen = 2; @@ -1695,7 +1705,9 @@ static int smu_smc_hw_setup(struct smu_context *smu) * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 */ - if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16) + if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X32) + pcie_width = 7; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16) pcie_width = 6; else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12) pcie_width = 5; @@ -1786,10 +1798,10 @@ static int smu_start_smc_engine(struct smu_context *smu) return ret; } -static int smu_hw_init(void *handle) +static int smu_hw_init(struct amdgpu_ip_block *ip_block) { int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct smu_context *smu = adev->powerplay.pp_handle; if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) { @@ -2008,9 +2020,9 @@ static int smu_reset_mp1_state(struct smu_context *smu) return ret; } -static int smu_hw_fini(void *handle) +static int smu_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct smu_context *smu = adev->powerplay.pp_handle; int ret; @@ -2041,9 +2053,9 @@ static int smu_hw_fini(void *handle) return 0; } -static void smu_late_fini(void *handle) +static void smu_late_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = handle; + struct amdgpu_device *adev = ip_block->adev; struct smu_context *smu = adev->powerplay.pp_handle; kfree(smu); @@ -2052,26 +2064,31 @@ static void smu_late_fini(void *handle) static int smu_reset(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; + struct amdgpu_ip_block *ip_block; int ret; - ret = smu_hw_fini(adev); + ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC); + if (!ip_block) + return -EINVAL; + + ret = smu_hw_fini(ip_block); if (ret) return ret; - ret = smu_hw_init(adev); + ret = smu_hw_init(ip_block); if (ret) return ret; - ret = smu_late_init(adev); + ret = smu_late_init(ip_block); if (ret) return ret; return 0; } -static int smu_suspend(void *handle) +static int smu_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct smu_context *smu = adev->powerplay.pp_handle; int ret; uint64_t count; @@ -2100,13 +2117,16 @@ static int smu_suspend(void *handle) if (!ret) adev->gfx.gfx_off_entrycount = count; + /* clear this on suspend so it will get reprogrammed on resume */ + smu->workload_mask = 0; + return 0; } -static int smu_resume(void *handle) +static int smu_resume(struct amdgpu_ip_block *ip_block) { int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct smu_context *smu = adev->powerplay.pp_handle; if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) @@ -2212,25 +2232,49 @@ static int smu_enable_umd_pstate(void *handle, } static int smu_bump_power_profile_mode(struct smu_context *smu, - long *param, - uint32_t param_size) + long *custom_params, + u32 custom_params_max_idx) { - int ret = 0; + u32 workload_mask = 0; + int i, ret = 0; + + for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) { + if (smu->workload_refcount[i]) + workload_mask |= 1 << i; + } + + if (smu->workload_mask == workload_mask) + return 0; if (smu->ppt_funcs->set_power_profile_mode) - ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size); + ret = smu->ppt_funcs->set_power_profile_mode(smu, workload_mask, + custom_params, + custom_params_max_idx); + + if (!ret) + smu->workload_mask = workload_mask; return ret; } +static void smu_power_profile_mode_get(struct smu_context *smu, + enum PP_SMC_POWER_PROFILE profile_mode) +{ + smu->workload_refcount[profile_mode]++; +} + +static void smu_power_profile_mode_put(struct smu_context *smu, + enum PP_SMC_POWER_PROFILE profile_mode) +{ + if (smu->workload_refcount[profile_mode]) + smu->workload_refcount[profile_mode]--; +} + static int smu_adjust_power_state_dynamic(struct smu_context *smu, enum amd_dpm_forced_level level, - bool skip_display_settings, - bool force_update) + bool skip_display_settings) { int ret = 0; - int index = 0; - long workload[1]; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); if (!skip_display_settings) { @@ -2255,7 +2299,7 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu, } } - if (force_update || smu_dpm_ctx->dpm_level != level) { + if (smu_dpm_ctx->dpm_level != level) { ret = smu_asic_set_performance_level(smu, level); if (ret) { dev_err(smu->adev->dev, "Failed to set performance level!"); @@ -2267,14 +2311,8 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu, } if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL && - smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) { - index = fls(smu->workload_mask); - index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; - workload[0] = smu->workload_setting[index]; - - if (force_update || smu->power_profile_mode != workload[0]) - smu_bump_power_profile_mode(smu, workload, 0); - } + smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) + smu_bump_power_profile_mode(smu, NULL, 0); return ret; } @@ -2293,13 +2331,13 @@ static int smu_handle_task(struct smu_context *smu, ret = smu_pre_display_config_changed(smu); if (ret) return ret; - ret = smu_adjust_power_state_dynamic(smu, level, false, false); + ret = smu_adjust_power_state_dynamic(smu, level, false); break; case AMD_PP_TASK_COMPLETE_INIT: - ret = smu_adjust_power_state_dynamic(smu, level, true, true); + ret = smu_adjust_power_state_dynamic(smu, level, true); break; case AMD_PP_TASK_READJUST_POWER_STATE: - ret = smu_adjust_power_state_dynamic(smu, level, true, false); + ret = smu_adjust_power_state_dynamic(smu, level, true); break; default: break; @@ -2321,12 +2359,11 @@ static int smu_handle_dpm_task(void *handle, static int smu_switch_power_profile(void *handle, enum PP_SMC_POWER_PROFILE type, - bool en) + bool enable) { struct smu_context *smu = handle; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); - long workload[1]; - uint32_t index; + int ret; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) return -EOPNOTSUPP; @@ -2334,21 +2371,21 @@ static int smu_switch_power_profile(void *handle, if (!(type < PP_SMC_POWER_PROFILE_CUSTOM)) return -EINVAL; - if (!en) { - smu->workload_mask &= ~(1 << smu->workload_prority[type]); - index = fls(smu->workload_mask); - index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; - workload[0] = smu->workload_setting[index]; - } else { - smu->workload_mask |= (1 << smu->workload_prority[type]); - index = fls(smu->workload_mask); - index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; - workload[0] = smu->workload_setting[index]; - } - if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL && - smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) - smu_bump_power_profile_mode(smu, workload, 0); + smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) { + if (enable) + smu_power_profile_mode_get(smu, type); + else + smu_power_profile_mode_put(smu, type); + ret = smu_bump_power_profile_mode(smu, NULL, 0); + if (ret) { + if (enable) + smu_power_profile_mode_put(smu, type); + else + smu_power_profile_mode_get(smu, type); + return ret; + } + } return 0; } @@ -2865,6 +2902,10 @@ static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type) clk_type = SMU_OD_FAN_TARGET_TEMPERATURE; break; case OD_FAN_MINIMUM_PWM: clk_type = SMU_OD_FAN_MINIMUM_PWM; break; + case OD_FAN_ZERO_RPM_ENABLE: + clk_type = SMU_OD_FAN_ZERO_RPM_ENABLE; break; + case OD_FAN_ZERO_RPM_STOP_TEMP: + clk_type = SMU_OD_FAN_ZERO_RPM_STOP_TEMP; break; default: clk_type = SMU_CLK_COUNT; break; } @@ -3036,12 +3077,35 @@ static int smu_set_power_profile_mode(void *handle, uint32_t param_size) { struct smu_context *smu = handle; + bool custom = false; + int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || !smu->ppt_funcs->set_power_profile_mode) return -EOPNOTSUPP; - return smu_bump_power_profile_mode(smu, param, param_size); + if (param[param_size] == PP_SMC_POWER_PROFILE_CUSTOM) { + custom = true; + /* clear frontend mask so custom changes propogate */ + smu->workload_mask = 0; + } + + if ((param[param_size] != smu->power_profile_mode) || custom) { + /* clear the old user preference */ + smu_power_profile_mode_put(smu, smu->power_profile_mode); + /* set the new user preference */ + smu_power_profile_mode_get(smu, param[param_size]); + ret = smu_bump_power_profile_mode(smu, + custom ? param : NULL, + custom ? param_size : 0); + if (ret) + smu_power_profile_mode_put(smu, param[param_size]); + else + /* store the user's preference */ + smu->power_profile_mode = param[param_size]; + } + + return ret; } static int smu_get_fan_control_mode(void *handle, u32 *fan_mode) diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index b44a185d07e8..3925815358ce 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -556,11 +556,13 @@ struct smu_context { uint32_t hard_min_uclk_req_from_dal; bool disable_uclk_switch; + /* asic agnostic workload mask */ uint32_t workload_mask; - uint32_t workload_prority[WORKLOAD_POLICY_MAX]; - uint32_t workload_setting[WORKLOAD_POLICY_MAX]; + /* default/user workload preference */ uint32_t power_profile_mode; - uint32_t default_power_profile_mode; + uint32_t workload_refcount[PP_SMC_POWER_PROFILE_COUNT]; + /* backend specific custom workload settings */ + long *custom_profile_params; bool pm_enabled; bool is_apu; @@ -731,15 +733,18 @@ struct pptable_funcs { * @set_power_profile_mode: Set a power profile mode. Also used to * create/set custom power profile modes. * &input: Power profile mode parameters. - * &size: Size of &input. + * &workload_mask: mask of workloads to enable + * &custom_params: custom profile parameters + * &custom_params_max_idx: max valid idx into custom_params */ - int (*set_power_profile_mode)(struct smu_context *smu, long *input, uint32_t size); + int (*set_power_profile_mode)(struct smu_context *smu, u32 workload_mask, + long *custom_params, u32 custom_params_max_idx); /** * @dpm_set_vcn_enable: Enable/disable VCN engine dynamic power * management. */ - int (*dpm_set_vcn_enable)(struct smu_context *smu, bool enable); + int (*dpm_set_vcn_enable)(struct smu_context *smu, bool enable, int inst); /** * @dpm_set_jpeg_enable: Enable/disable JPEG engine dynamic power @@ -859,11 +864,6 @@ struct pptable_funcs { int (*display_disable_memory_clock_switch)(struct smu_context *smu, bool disable_memory_clock_switch); /** - * @dump_pptable: Print the power play table to the system log. - */ - void (*dump_pptable)(struct smu_context *smu); - - /** * @get_power_limit: Get the device's power limits. */ int (*get_power_limit)(struct smu_context *smu, @@ -1260,7 +1260,8 @@ struct pptable_funcs { * @set_soft_freq_limited_range: Set the soft frequency range of a clock * domain in MHz. */ - int (*set_soft_freq_limited_range)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max); + int (*set_soft_freq_limited_range)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max, + bool automatic); /** * @set_power_source: Notify the SMU of the current power source. diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h index ee457a6f0813..c2fd0a4a13e5 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h @@ -25,7 +25,7 @@ #define SMU14_DRIVER_IF_V14_0_H //Increment this version if SkuTable_t or BoardTable_t change -#define PPTABLE_VERSION 0x18 +#define PPTABLE_VERSION 0x1B #define NUM_GFXCLK_DPM_LEVELS 16 #define NUM_SOCCLK_DPM_LEVELS 8 @@ -145,7 +145,7 @@ typedef enum { } FEATURE_BTC_e; // Debug Overrides Bitmask -#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_VCN_FCLK 0x00000001 +#define DEBUG_OVERRIDE_NOT_USE 0x00000001 #define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_DCN_FCLK 0x00000002 #define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_MP0_FCLK 0x00000004 #define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_VCN_DCFCLK 0x00000008 @@ -161,6 +161,7 @@ typedef enum { #define DEBUG_OVERRIDE_ENABLE_SOC_VF_BRINGUP_MODE 0x00002000 #define DEBUG_OVERRIDE_ENABLE_PER_WGP_RESIENCY 0x00004000 #define DEBUG_OVERRIDE_DISABLE_MEMORY_VOLTAGE_SCALING 0x00008000 +#define DEBUG_OVERRIDE_DFLL_BTC_FCW_LOG 0x00010000 // VR Mapping Bit Defines #define VR_MAPPING_VR_SELECT_MASK 0x01 @@ -391,6 +392,21 @@ typedef struct { EccInfo_t EccInfo[24]; } EccInfoTable_t; +#define EPCS_HIGH_POWER 600 +#define EPCS_NORMAL_POWER 450 +#define EPCS_LOW_POWER 300 +#define EPCS_SHORTED_POWER 150 +#define EPCS_NO_BOOTUP 0 + +typedef enum{ + EPCS_SHORTED_LIMIT, + EPCS_LOW_POWER_LIMIT, + EPCS_NORMAL_POWER_LIMIT, + EPCS_HIGH_POWER_LIMIT, + EPCS_NOT_CONFIGURED, + EPCS_STATUS_COUNT, +} EPCS_STATUS_e; + //D3HOT sequences typedef enum { BACO_SEQUENCE, @@ -662,7 +678,7 @@ typedef enum { } PP_GRTAVFS_FW_SEP_FUSE_e; #define PP_NUM_RTAVFS_PWL_ZONES 5 - +#define PP_NUM_PSM_DIDT_PWL_ZONES 3 // VBIOS or PPLIB configures telemetry slope and offset. Only slope expected to be set for SVI3 // Slope Q1.7, Offset Q1.2 @@ -746,10 +762,10 @@ typedef struct { uint16_t Padding; //Frequency changes - int16_t GfxclkFmin; // MHz - int16_t GfxclkFmax; // MHz - uint16_t UclkFmin; // MHz - uint16_t UclkFmax; // MHz + int16_t GfxclkFoffset; + uint16_t Padding1; + uint16_t UclkFmin; + uint16_t UclkFmax; uint16_t FclkFmin; uint16_t FclkFmax; @@ -770,19 +786,23 @@ typedef struct { uint8_t MaxOpTemp; uint8_t AdvancedOdModeEnabled; - uint8_t Padding1[3]; + uint8_t Padding2[3]; uint16_t GfxVoltageFullCtrlMode; uint16_t SocVoltageFullCtrlMode; uint16_t GfxclkFullCtrlMode; uint16_t UclkFullCtrlMode; uint16_t FclkFullCtrlMode; - uint16_t Padding2; + uint16_t Padding3; int16_t GfxEdc; int16_t GfxPccLimitControl; - uint32_t Spare[10]; + uint16_t GfxclkFmaxVmax; + uint8_t GfxclkFmaxVmaxTemperature; + uint8_t Padding4[1]; + + uint32_t Spare[9]; uint32_t MmHubPadding[8]; // SMU internal use. Adding here instead of external as a workaround } OverDriveTable_t; @@ -802,8 +822,8 @@ typedef struct { uint16_t VddSocVmax; //gfxclk - int16_t GfxclkFmin; // MHz - int16_t GfxclkFmax; // MHz + int16_t GfxclkFoffset; + uint16_t Padding; //uclk uint16_t UclkFmin; // MHz uint16_t UclkFmax; // MHz @@ -828,7 +848,7 @@ typedef struct { uint8_t FanZeroRpmEnable; //temperature uint8_t MaxOpTemp; - uint8_t Padding[2]; + uint8_t Padding1[2]; //Full Ctrl uint16_t GfxVoltageFullCtrlMode; @@ -839,7 +859,7 @@ typedef struct { //EDC int16_t GfxEdc; int16_t GfxPccLimitControl; - int16_t Padding1; + int16_t Padding2; uint32_t Spare[5]; } OverDriveLimits_t; @@ -987,8 +1007,9 @@ typedef struct { uint16_t BaseClockDc; uint16_t GameClockDc; uint16_t BoostClockDc; - - uint32_t Reserved[4]; + uint16_t MaxReportedClock; + uint16_t Padding; + uint32_t Reserved[3]; } DriverReportedClocks_t; typedef struct { @@ -1132,7 +1153,7 @@ typedef struct { uint32_t DcModeMaxFreq [PPCLK_COUNT ]; // In MHz uint16_t GfxclkAibFmax; - uint16_t GfxclkFreqCap; + uint16_t GfxDpmPadding; //GFX Idle Power Settings uint16_t GfxclkFgfxoffEntry; // Entry in RLC stage (PLL), in Mhz @@ -1172,8 +1193,7 @@ typedef struct { uint32_t DvoFmaxLowScaler; //Unitless float // GFX DCS - uint16_t DcsGfxOffVoltage; //Voltage in mV(Q2) applied to VDDGFX when entering DCS GFXOFF phase - uint16_t PaddingDcs; + uint32_t PaddingDcs; uint16_t DcsMinGfxOffTime; //Minimum amount of time PMFW shuts GFX OFF as part of GFX DCS phase uint16_t DcsMaxGfxOffTime; //Maximum amount of time PMFW can shut GFX OFF as part of GFX DCS phase at a stretch. @@ -1205,8 +1225,7 @@ typedef struct { uint16_t DalDcModeMaxUclkFreq; uint8_t PaddingsMem[2]; //FCLK Section - uint16_t FclkDpmDisallowPstateFreq; //Frequency which FW will target when indicated that display config cannot support P-state. Set to 0 use FW calculated value - uint16_t PaddingFclk; + uint32_t PaddingFclk; // Link DPM Settings uint8_t PcieGenSpeed[NUM_LINK_LEVELS]; ///< 0:PciE-gen1 1:PciE-gen2 2:PciE-gen3 3:PciE-gen4 4:PciE-gen5 @@ -1215,12 +1234,19 @@ typedef struct { // SECTION: VDD_GFX AVFS uint8_t OverrideGfxAvfsFuses; - uint8_t GfxAvfsPadding[3]; + uint8_t GfxAvfsPadding[1]; + uint16_t DroopGBStDev; uint32_t SocHwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT]; //new added for Soc domain uint32_t GfxL2HwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT]; //see fusedoc for encoding //uint32_t GfxSeHwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT]; - uint32_t spare_HwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT]; + + uint16_t PsmDidt_Vcross[PP_NUM_PSM_DIDT_PWL_ZONES-1]; + uint32_t PsmDidt_StaticDroop_A[PP_NUM_PSM_DIDT_PWL_ZONES]; + uint32_t PsmDidt_StaticDroop_B[PP_NUM_PSM_DIDT_PWL_ZONES]; + uint32_t PsmDidt_DynDroop_A[PP_NUM_PSM_DIDT_PWL_ZONES]; + uint32_t PsmDidt_DynDroop_B[PP_NUM_PSM_DIDT_PWL_ZONES]; + uint32_t spare_HwRtAvfsFuses[19]; uint32_t SocCommonRtAvfs[PP_GRTAVFS_FW_COMMON_FUSE_COUNT]; uint32_t GfxCommonRtAvfs[PP_GRTAVFS_FW_COMMON_FUSE_COUNT]; @@ -1246,11 +1272,7 @@ typedef struct { uint32_t dGbV_dT_vmin; uint32_t dGbV_dT_vmax; - //Unused: PMFW-9370 - uint32_t V2F_vmin_range_low; - uint32_t V2F_vmin_range_high; - uint32_t V2F_vmax_range_low; - uint32_t V2F_vmax_range_high; + uint32_t PaddingV2F[4]; AvfsDcBtcParams_t DcBtcGfxParams; QuadraticInt_t SSCurve_GFX; @@ -1327,18 +1349,18 @@ typedef struct { uint16_t PsmDidtReleaseTimer; uint32_t PsmDidtStallPattern; //Will be written to both pattern 1 and didt_static_level_prog // CAC EDC - uint32_t Leakage_C0; // in IEEE float - uint32_t Leakage_C1; // in IEEE float - uint32_t Leakage_C2; // in IEEE float - uint32_t Leakage_C3; // in IEEE float - uint32_t Leakage_C4; // in IEEE float - uint32_t Leakage_C5; // in IEEE float - uint32_t GFX_CLK_SCALAR; // in IEEE float - uint32_t GFX_CLK_INTERCEPT; // in IEEE float - uint32_t GFX_CAC_M; // in IEEE float - uint32_t GFX_CAC_B; // in IEEE float - uint32_t VDD_GFX_CurrentLimitGuardband; // in IEEE float - uint32_t DynToTotalCacScalar; // in IEEE + uint32_t CacEdcCacLeakageC0; + uint32_t CacEdcCacLeakageC1; + uint32_t CacEdcCacLeakageC2; + uint32_t CacEdcCacLeakageC3; + uint32_t CacEdcCacLeakageC4; + uint32_t CacEdcCacLeakageC5; + uint32_t CacEdcGfxClkScalar; + uint32_t CacEdcGfxClkIntercept; + uint32_t CacEdcCac_m; + uint32_t CacEdcCac_b; + uint32_t CacEdcCurrLimitGuardband; + uint32_t CacEdcDynToTotalCacRatio; // GFX EDC XVMIN uint32_t XVmin_Gfx_EdcThreshScalar; uint32_t XVmin_Gfx_EdcEnableFreq; @@ -1467,7 +1489,7 @@ typedef struct { uint8_t VddqOffEnabled; uint8_t PaddingUmcFlags[2]; - uint32_t PostVoltageSetBacoDelay; // in microseconds. Amount of time FW will wait after power good is established or PSI0 command is issued + uint32_t Paddign1; uint32_t BacoEntryDelay; // in milliseconds. Amount of time FW will wait to trigger BACO entry after receiving entry notification from OS uint8_t FuseWritePowerMuxPresent; @@ -1530,7 +1552,7 @@ typedef struct { int16_t FuzzyFan_ErrorSetDelta; int16_t FuzzyFan_ErrorRateSetDelta; int16_t FuzzyFan_PwmSetDelta; - uint16_t FuzzyFan_Reserved; + uint16_t FanPadding2; uint16_t FwCtfLimit[TEMP_COUNT]; @@ -1547,9 +1569,10 @@ typedef struct { uint16_t FanSpare[1]; uint8_t FanIntakeSensorSupport; uint8_t FanIntakePadding; - uint32_t FanAmbientPerfBoostThreshold; uint32_t FanSpare2[12]; + uint32_t ODFeatureCtrlMask; + uint16_t TemperatureLimit_Hynix; // In degrees Celsius. Memory temperature limit associated with Hynix uint16_t TemperatureLimit_Micron; // In degrees Celsius. Memory temperature limit associated with Micron uint16_t TemperatureFwCtfLimit_Hynix; @@ -1637,7 +1660,7 @@ typedef struct { uint16_t AverageDclk0Frequency ; uint16_t AverageVclk1Frequency ; uint16_t AverageDclk1Frequency ; - uint16_t PCIeBusy ; + uint16_t AveragePCIeBusy ; uint16_t dGPU_W_MAX ; uint16_t padding ; @@ -1665,12 +1688,12 @@ typedef struct { uint16_t AverageGfxActivity ; uint16_t AverageUclkActivity ; - uint16_t Vcn0ActivityPercentage ; + uint16_t AverageVcn0ActivityPercentage; uint16_t Vcn1ActivityPercentage ; uint32_t EnergyAccumulator; uint16_t AverageSocketPower; - uint16_t MovingAverageTotalBoardPower; + uint16_t AverageTotalBoardPower; uint16_t AvgTemperature[TEMP_COUNT]; uint16_t AvgTemperatureFanIntake; @@ -1684,7 +1707,8 @@ typedef struct { uint8_t ThrottlingPercentage[THROTTLER_COUNT]; - uint8_t padding1[3]; + uint8_t VmaxThrottlingPercentage; + uint8_t padding1[2]; //metrics for D3hot entry/exit and driver ARM msgs uint32_t D3HotEntryCountPerMode[D3HOT_SEQUENCE_COUNT]; @@ -1693,7 +1717,7 @@ typedef struct { uint16_t ApuSTAPMSmartShiftLimit; uint16_t ApuSTAPMLimit; - uint16_t MovingAvgApuSocketPower; + uint16_t AvgApuSocketPower; uint16_t AverageUclkActivity_MAX; @@ -1823,6 +1847,17 @@ typedef struct { #define TABLE_TRANSFER_FAILED 0xFF #define TABLE_TRANSFER_PENDING 0xAB +#define TABLE_PPT_FAILED 0x100 +#define TABLE_TDC_FAILED 0x200 +#define TABLE_TEMP_FAILED 0x400 +#define TABLE_FAN_TARGET_TEMP_FAILED 0x800 +#define TABLE_FAN_STOP_TEMP_FAILED 0x1000 +#define TABLE_FAN_START_TEMP_FAILED 0x2000 +#define TABLE_FAN_PWM_MIN_FAILED 0x4000 +#define TABLE_ACOUSTIC_TARGET_RPM_FAILED 0x8000 +#define TABLE_ACOUSTIC_LIMIT_RPM_FAILED 0x10000 +#define TABLE_MGPU_ACOUSTIC_TARGET_RPM_FAILED 0x20000 + // Table types #define TABLE_PPTABLE 0 #define TABLE_COMBO_PPTABLE 1 @@ -1849,5 +1884,6 @@ typedef struct { #define IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING 0x7 #define IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL 0x8 #define IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY 0x9 +#define IH_INTERRUPT_CONTEXT_ID_DYNAMIC_TABLE 0xA #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h index 822c6425d90e..0f96b8c59a0e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h @@ -123,7 +123,7 @@ typedef enum { VOLTAGE_GUARDBAND_COUNT } GFX_GUARDBAND_e; -#define SMU_METRICS_TABLE_VERSION 0xD +#define SMU_METRICS_TABLE_VERSION 0xE typedef struct __attribute__((packed, aligned(4))) { uint32_t AccumulationCounter; @@ -231,6 +231,9 @@ typedef struct __attribute__((packed, aligned(4))) { // PER XCD ACTIVITY uint32_t GfxBusy[8]; uint64_t GfxBusyAcc[8]; + + //PCIE BW Data and error count + uint32_t PCIeOtherEndRecoveryAcc; // The Pcie counter itself is accumulated } MetricsTableX_t; typedef struct __attribute__((packed, aligned(4))) { diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h index e71a721c12b9..a299dc4a8071 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h @@ -313,6 +313,8 @@ enum smu_clk_type { SMU_OD_ACOUSTIC_TARGET, SMU_OD_FAN_TARGET_TEMPERATURE, SMU_OD_FAN_MINIMUM_PWM, + SMU_OD_FAN_ZERO_RPM_ENABLE, + SMU_OD_FAN_ZERO_RPM_STOP_TEMP, SMU_CLK_COUNT, }; diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h index c2ab336bb530..ed8304d82831 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h @@ -255,7 +255,7 @@ int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c uint32_t *min, uint32_t *max); int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, - uint32_t min, uint32_t max); + uint32_t min, uint32_t max, bool automatic); int smu_v11_0_set_hard_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h index 1ad2dff71090..0886d8cffbd0 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h @@ -56,7 +56,7 @@ int smu_v12_0_set_default_dpm_tables(struct smu_context *smu); int smu_v12_0_mode2_reset(struct smu_context *smu); int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, - uint32_t min, uint32_t max); + uint32_t min, uint32_t max, bool automatic); int smu_v12_0_set_driver_table_location(struct smu_context *smu); diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h index e58220a7ee2f..ae3563d71fa0 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h @@ -219,7 +219,7 @@ int smu_v13_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c uint32_t *min, uint32_t *max); int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, - uint32_t min, uint32_t max); + uint32_t min, uint32_t max, bool automatic); int smu_v13_0_set_hard_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, @@ -255,7 +255,8 @@ int smu_v13_0_wait_for_event(struct smu_context *smu, enum smu_event_type event, uint64_t event_arg); int smu_v13_0_set_vcn_enable(struct smu_context *smu, - bool enable); + bool enable, + int inst); int smu_v13_0_set_jpeg_enable(struct smu_context *smu, bool enable); diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h index 46b456590a08..29a4583db873 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h @@ -28,7 +28,7 @@ #define SMU14_DRIVER_IF_VERSION_INV 0xFFFFFFFF #define SMU14_DRIVER_IF_VERSION_SMU_V14_0_0 0x7 #define SMU14_DRIVER_IF_VERSION_SMU_V14_0_1 0x6 -#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x26 +#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x2E #define FEATURE_MASK(feature) (1ULL << feature) @@ -53,7 +53,7 @@ #define CTF_OFFSET_MEM 5 extern const int decoded_link_speed[5]; -extern const int decoded_link_width[7]; +extern const int decoded_link_width[8]; #define DECODE_GEN_SPEED(gen_speed_idx) (decoded_link_speed[gen_speed_idx]) #define DECODE_LANE_WIDTH(lane_width_idx) (decoded_link_width[lane_width_idx]) @@ -186,7 +186,7 @@ int smu_v14_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c uint32_t *min, uint32_t *max); int smu_v14_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, - uint32_t min, uint32_t max); + uint32_t min, uint32_t max, bool automatic); int smu_v14_0_set_hard_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, @@ -210,7 +210,8 @@ int smu_v14_0_wait_for_event(struct smu_context *smu, enum smu_event_type event, uint64_t event_arg); int smu_v14_0_set_vcn_enable(struct smu_context *smu, - bool enable); + bool enable, + int inst); int smu_v14_0_set_jpeg_enable(struct smu_context *smu, bool enable); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index c0f6b59369b7..8aa61a9f7778 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -1344,8 +1344,12 @@ static int arcturus_get_power_limit(struct smu_context *smu, *default_power_limit = power_limit; if (max_power_limit) *max_power_limit = power_limit; + /** + * No lower bound is imposed on the limit. Any unreasonable limit set + * will result in frequent throttling. + */ if (min_power_limit) - *min_power_limit = power_limit; + *min_power_limit = 0; return 0; } @@ -1441,98 +1445,120 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu, return size; } -static int arcturus_set_power_profile_mode(struct smu_context *smu, - long *input, - uint32_t size) +#define ARCTURUS_CUSTOM_PARAMS_COUNT 10 +#define ARCTURUS_CUSTOM_PARAMS_CLOCK_COUNT 2 +#define ARCTURUS_CUSTOM_PARAMS_SIZE (ARCTURUS_CUSTOM_PARAMS_CLOCK_COUNT * ARCTURUS_CUSTOM_PARAMS_COUNT * sizeof(long)) + +static int arcturus_set_power_profile_mode_coeff(struct smu_context *smu, + long *input) { DpmActivityMonitorCoeffInt_t activity_monitor; - int workload_type = 0; - uint32_t profile_mode = input[size]; - int ret = 0; + int ret, idx; - if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { - dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode); - return -EINVAL; + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, + WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor), + false); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); + return ret; } + idx = 0 * ARCTURUS_CUSTOM_PARAMS_COUNT; + if (input[idx]) { + /* Gfxclk */ + activity_monitor.Gfx_FPS = input[idx + 1]; + activity_monitor.Gfx_UseRlcBusy = input[idx + 2]; + activity_monitor.Gfx_MinActiveFreqType = input[idx + 3]; + activity_monitor.Gfx_MinActiveFreq = input[idx + 4]; + activity_monitor.Gfx_BoosterFreqType = input[idx + 5]; + activity_monitor.Gfx_BoosterFreq = input[idx + 6]; + activity_monitor.Gfx_PD_Data_limit_c = input[idx + 7]; + activity_monitor.Gfx_PD_Data_error_coeff = input[idx + 8]; + activity_monitor.Gfx_PD_Data_error_rate_coeff = input[idx + 9]; + } + idx = 1 * ARCTURUS_CUSTOM_PARAMS_COUNT; + if (input[idx]) { + /* Uclk */ + activity_monitor.Mem_FPS = input[idx + 1]; + activity_monitor.Mem_UseRlcBusy = input[idx + 2]; + activity_monitor.Mem_MinActiveFreqType = input[idx + 3]; + activity_monitor.Mem_MinActiveFreq = input[idx + 4]; + activity_monitor.Mem_BoosterFreqType = input[idx + 5]; + activity_monitor.Mem_BoosterFreq = input[idx + 6]; + activity_monitor.Mem_PD_Data_limit_c = input[idx + 7]; + activity_monitor.Mem_PD_Data_error_coeff = input[idx + 8]; + activity_monitor.Mem_PD_Data_error_rate_coeff = input[idx + 9]; + } - if ((profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) && - (smu->smc_fw_version >= 0x360d00)) { - if (size != 10) - return -EINVAL; + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, + WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor), + true); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); + return ret; + } - ret = smu_cmn_update_table(smu, - SMU_TABLE_ACTIVITY_MONITOR_COEFF, - WORKLOAD_PPLIB_CUSTOM_BIT, - (void *)(&activity_monitor), - false); - if (ret) { - dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); - return ret; - } + return ret; +} - switch (input[0]) { - case 0: /* Gfxclk */ - activity_monitor.Gfx_FPS = input[1]; - activity_monitor.Gfx_UseRlcBusy = input[2]; - activity_monitor.Gfx_MinActiveFreqType = input[3]; - activity_monitor.Gfx_MinActiveFreq = input[4]; - activity_monitor.Gfx_BoosterFreqType = input[5]; - activity_monitor.Gfx_BoosterFreq = input[6]; - activity_monitor.Gfx_PD_Data_limit_c = input[7]; - activity_monitor.Gfx_PD_Data_error_coeff = input[8]; - activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9]; - break; - case 1: /* Uclk */ - activity_monitor.Mem_FPS = input[1]; - activity_monitor.Mem_UseRlcBusy = input[2]; - activity_monitor.Mem_MinActiveFreqType = input[3]; - activity_monitor.Mem_MinActiveFreq = input[4]; - activity_monitor.Mem_BoosterFreqType = input[5]; - activity_monitor.Mem_BoosterFreq = input[6]; - activity_monitor.Mem_PD_Data_limit_c = input[7]; - activity_monitor.Mem_PD_Data_error_coeff = input[8]; - activity_monitor.Mem_PD_Data_error_rate_coeff = input[9]; - break; - default: +static int arcturus_set_power_profile_mode(struct smu_context *smu, + u32 workload_mask, + long *custom_params, + u32 custom_params_max_idx) +{ + u32 backend_workload_mask = 0; + int ret, idx = -1, i; + + smu_cmn_get_backend_workload_mask(smu, workload_mask, + &backend_workload_mask); + + if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) { + if (smu->smc_fw_version < 0x360d00) return -EINVAL; + if (!smu->custom_profile_params) { + smu->custom_profile_params = + kzalloc(ARCTURUS_CUSTOM_PARAMS_SIZE, GFP_KERNEL); + if (!smu->custom_profile_params) + return -ENOMEM; } - - ret = smu_cmn_update_table(smu, - SMU_TABLE_ACTIVITY_MONITOR_COEFF, - WORKLOAD_PPLIB_CUSTOM_BIT, - (void *)(&activity_monitor), - true); + if (custom_params && custom_params_max_idx) { + if (custom_params_max_idx != ARCTURUS_CUSTOM_PARAMS_COUNT) + return -EINVAL; + if (custom_params[0] >= ARCTURUS_CUSTOM_PARAMS_CLOCK_COUNT) + return -EINVAL; + idx = custom_params[0] * ARCTURUS_CUSTOM_PARAMS_COUNT; + smu->custom_profile_params[idx] = 1; + for (i = 1; i < custom_params_max_idx; i++) + smu->custom_profile_params[idx + i] = custom_params[i]; + } + ret = arcturus_set_power_profile_mode_coeff(smu, + smu->custom_profile_params); if (ret) { - dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); + if (idx != -1) + smu->custom_profile_params[idx] = 0; return ret; } - } - - /* - * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT - * Not all profile modes are supported on arcturus. - */ - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, - profile_mode); - if (workload_type < 0) { - dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on arcturus\n", profile_mode); - return -EINVAL; + } else if (smu->custom_profile_params) { + memset(smu->custom_profile_params, 0, ARCTURUS_CUSTOM_PARAMS_SIZE); } ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_SetWorkloadMask, - 1 << workload_type, - NULL); + SMU_MSG_SetWorkloadMask, + backend_workload_mask, + NULL); if (ret) { - dev_err(smu->adev->dev, "Fail to set workload type %d\n", workload_type); + dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n", + workload_mask); + if (idx != -1) + smu->custom_profile_params[idx] = 0; return ret; } - smu->power_profile_mode = profile_mode; - - return 0; + return ret; } static int arcturus_set_performance_level(struct smu_context *smu, @@ -1559,437 +1585,6 @@ static int arcturus_set_performance_level(struct smu_context *smu, return smu_v11_0_set_performance_level(smu, level); } -static void arcturus_dump_pptable(struct smu_context *smu) -{ - struct smu_table_context *table_context = &smu->smu_table; - PPTable_t *pptable = table_context->driver_pptable; - int i; - - dev_info(smu->adev->dev, "Dumped PPTable:\n"); - - dev_info(smu->adev->dev, "Version = 0x%08x\n", pptable->Version); - - dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", pptable->FeaturesToRun[0]); - dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", pptable->FeaturesToRun[1]); - - for (i = 0; i < PPT_THROTTLER_COUNT; i++) { - dev_info(smu->adev->dev, "SocketPowerLimitAc[%d] = %d\n", i, pptable->SocketPowerLimitAc[i]); - dev_info(smu->adev->dev, "SocketPowerLimitAcTau[%d] = %d\n", i, pptable->SocketPowerLimitAcTau[i]); - } - - dev_info(smu->adev->dev, "TdcLimitSoc = %d\n", pptable->TdcLimitSoc); - dev_info(smu->adev->dev, "TdcLimitSocTau = %d\n", pptable->TdcLimitSocTau); - dev_info(smu->adev->dev, "TdcLimitGfx = %d\n", pptable->TdcLimitGfx); - dev_info(smu->adev->dev, "TdcLimitGfxTau = %d\n", pptable->TdcLimitGfxTau); - - dev_info(smu->adev->dev, "TedgeLimit = %d\n", pptable->TedgeLimit); - dev_info(smu->adev->dev, "ThotspotLimit = %d\n", pptable->ThotspotLimit); - dev_info(smu->adev->dev, "TmemLimit = %d\n", pptable->TmemLimit); - dev_info(smu->adev->dev, "Tvr_gfxLimit = %d\n", pptable->Tvr_gfxLimit); - dev_info(smu->adev->dev, "Tvr_memLimit = %d\n", pptable->Tvr_memLimit); - dev_info(smu->adev->dev, "Tvr_socLimit = %d\n", pptable->Tvr_socLimit); - dev_info(smu->adev->dev, "FitLimit = %d\n", pptable->FitLimit); - - dev_info(smu->adev->dev, "PpmPowerLimit = %d\n", pptable->PpmPowerLimit); - dev_info(smu->adev->dev, "PpmTemperatureThreshold = %d\n", pptable->PpmTemperatureThreshold); - - dev_info(smu->adev->dev, "ThrottlerControlMask = %d\n", pptable->ThrottlerControlMask); - - dev_info(smu->adev->dev, "UlvVoltageOffsetGfx = %d\n", pptable->UlvVoltageOffsetGfx); - dev_info(smu->adev->dev, "UlvPadding = 0x%08x\n", pptable->UlvPadding); - - dev_info(smu->adev->dev, "UlvGfxclkBypass = %d\n", pptable->UlvGfxclkBypass); - dev_info(smu->adev->dev, "Padding234[0] = 0x%02x\n", pptable->Padding234[0]); - dev_info(smu->adev->dev, "Padding234[1] = 0x%02x\n", pptable->Padding234[1]); - dev_info(smu->adev->dev, "Padding234[2] = 0x%02x\n", pptable->Padding234[2]); - - dev_info(smu->adev->dev, "MinVoltageGfx = %d\n", pptable->MinVoltageGfx); - dev_info(smu->adev->dev, "MinVoltageSoc = %d\n", pptable->MinVoltageSoc); - dev_info(smu->adev->dev, "MaxVoltageGfx = %d\n", pptable->MaxVoltageGfx); - dev_info(smu->adev->dev, "MaxVoltageSoc = %d\n", pptable->MaxVoltageSoc); - - dev_info(smu->adev->dev, "LoadLineResistanceGfx = %d\n", pptable->LoadLineResistanceGfx); - dev_info(smu->adev->dev, "LoadLineResistanceSoc = %d\n", pptable->LoadLineResistanceSoc); - - dev_info(smu->adev->dev, "[PPCLK_GFXCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_GFXCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_GFXCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_GFXCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_GFXCLK].padding, - pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_GFXCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_VCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_VCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_VCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_VCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_VCLK].padding, - pptable->DpmDescriptor[PPCLK_VCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_VCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_VCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_VCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_DCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_DCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_DCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_DCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_DCLK].padding, - pptable->DpmDescriptor[PPCLK_DCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_DCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_DCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_DCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_SOCCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_SOCCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_SOCCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_SOCCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_SOCCLK].padding, - pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_SOCCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_UCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_UCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_UCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_UCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_UCLK].padding, - pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_UCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_UCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_FCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_FCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_FCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_FCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_FCLK].padding, - pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_FCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_FCLK].Padding16); - - - dev_info(smu->adev->dev, "FreqTableGfx\n"); - for (i = 0; i < NUM_GFXCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = %d\n", i, pptable->FreqTableGfx[i]); - - dev_info(smu->adev->dev, "FreqTableVclk\n"); - for (i = 0; i < NUM_VCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = %d\n", i, pptable->FreqTableVclk[i]); - - dev_info(smu->adev->dev, "FreqTableDclk\n"); - for (i = 0; i < NUM_DCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = %d\n", i, pptable->FreqTableDclk[i]); - - dev_info(smu->adev->dev, "FreqTableSocclk\n"); - for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = %d\n", i, pptable->FreqTableSocclk[i]); - - dev_info(smu->adev->dev, "FreqTableUclk\n"); - for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = %d\n", i, pptable->FreqTableUclk[i]); - - dev_info(smu->adev->dev, "FreqTableFclk\n"); - for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = %d\n", i, pptable->FreqTableFclk[i]); - - dev_info(smu->adev->dev, "Mp0clkFreq\n"); - for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = %d\n", i, pptable->Mp0clkFreq[i]); - - dev_info(smu->adev->dev, "Mp0DpmVoltage\n"); - for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = %d\n", i, pptable->Mp0DpmVoltage[i]); - - dev_info(smu->adev->dev, "GfxclkFidle = 0x%x\n", pptable->GfxclkFidle); - dev_info(smu->adev->dev, "GfxclkSlewRate = 0x%x\n", pptable->GfxclkSlewRate); - dev_info(smu->adev->dev, "Padding567[0] = 0x%x\n", pptable->Padding567[0]); - dev_info(smu->adev->dev, "Padding567[1] = 0x%x\n", pptable->Padding567[1]); - dev_info(smu->adev->dev, "Padding567[2] = 0x%x\n", pptable->Padding567[2]); - dev_info(smu->adev->dev, "Padding567[3] = 0x%x\n", pptable->Padding567[3]); - dev_info(smu->adev->dev, "GfxclkDsMaxFreq = %d\n", pptable->GfxclkDsMaxFreq); - dev_info(smu->adev->dev, "GfxclkSource = 0x%x\n", pptable->GfxclkSource); - dev_info(smu->adev->dev, "Padding456 = 0x%x\n", pptable->Padding456); - - dev_info(smu->adev->dev, "EnableTdpm = %d\n", pptable->EnableTdpm); - dev_info(smu->adev->dev, "TdpmHighHystTemperature = %d\n", pptable->TdpmHighHystTemperature); - dev_info(smu->adev->dev, "TdpmLowHystTemperature = %d\n", pptable->TdpmLowHystTemperature); - dev_info(smu->adev->dev, "GfxclkFreqHighTempLimit = %d\n", pptable->GfxclkFreqHighTempLimit); - - dev_info(smu->adev->dev, "FanStopTemp = %d\n", pptable->FanStopTemp); - dev_info(smu->adev->dev, "FanStartTemp = %d\n", pptable->FanStartTemp); - - dev_info(smu->adev->dev, "FanGainEdge = %d\n", pptable->FanGainEdge); - dev_info(smu->adev->dev, "FanGainHotspot = %d\n", pptable->FanGainHotspot); - dev_info(smu->adev->dev, "FanGainVrGfx = %d\n", pptable->FanGainVrGfx); - dev_info(smu->adev->dev, "FanGainVrSoc = %d\n", pptable->FanGainVrSoc); - dev_info(smu->adev->dev, "FanGainVrMem = %d\n", pptable->FanGainVrMem); - dev_info(smu->adev->dev, "FanGainHbm = %d\n", pptable->FanGainHbm); - - dev_info(smu->adev->dev, "FanPwmMin = %d\n", pptable->FanPwmMin); - dev_info(smu->adev->dev, "FanAcousticLimitRpm = %d\n", pptable->FanAcousticLimitRpm); - dev_info(smu->adev->dev, "FanThrottlingRpm = %d\n", pptable->FanThrottlingRpm); - dev_info(smu->adev->dev, "FanMaximumRpm = %d\n", pptable->FanMaximumRpm); - dev_info(smu->adev->dev, "FanTargetTemperature = %d\n", pptable->FanTargetTemperature); - dev_info(smu->adev->dev, "FanTargetGfxclk = %d\n", pptable->FanTargetGfxclk); - dev_info(smu->adev->dev, "FanZeroRpmEnable = %d\n", pptable->FanZeroRpmEnable); - dev_info(smu->adev->dev, "FanTachEdgePerRev = %d\n", pptable->FanTachEdgePerRev); - dev_info(smu->adev->dev, "FanTempInputSelect = %d\n", pptable->FanTempInputSelect); - - dev_info(smu->adev->dev, "FuzzyFan_ErrorSetDelta = %d\n", pptable->FuzzyFan_ErrorSetDelta); - dev_info(smu->adev->dev, "FuzzyFan_ErrorRateSetDelta = %d\n", pptable->FuzzyFan_ErrorRateSetDelta); - dev_info(smu->adev->dev, "FuzzyFan_PwmSetDelta = %d\n", pptable->FuzzyFan_PwmSetDelta); - dev_info(smu->adev->dev, "FuzzyFan_Reserved = %d\n", pptable->FuzzyFan_Reserved); - - dev_info(smu->adev->dev, "OverrideAvfsGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "OverrideAvfsGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_SOC]); - dev_info(smu->adev->dev, "Padding8_Avfs[0] = %d\n", pptable->Padding8_Avfs[0]); - dev_info(smu->adev->dev, "Padding8_Avfs[1] = %d\n", pptable->Padding8_Avfs[1]); - - dev_info(smu->adev->dev, "dBtcGbGfxPll{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbGfxPll.a, - pptable->dBtcGbGfxPll.b, - pptable->dBtcGbGfxPll.c); - dev_info(smu->adev->dev, "dBtcGbGfxAfll{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbGfxAfll.a, - pptable->dBtcGbGfxAfll.b, - pptable->dBtcGbGfxAfll.c); - dev_info(smu->adev->dev, "dBtcGbSoc{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbSoc.a, - pptable->dBtcGbSoc.b, - pptable->dBtcGbSoc.c); - - dev_info(smu->adev->dev, "qAgingGb[AVFS_VOLTAGE_GFX]{m = 0x%x b = 0x%x}\n", - pptable->qAgingGb[AVFS_VOLTAGE_GFX].m, - pptable->qAgingGb[AVFS_VOLTAGE_GFX].b); - dev_info(smu->adev->dev, "qAgingGb[AVFS_VOLTAGE_SOC]{m = 0x%x b = 0x%x}\n", - pptable->qAgingGb[AVFS_VOLTAGE_SOC].m, - pptable->qAgingGb[AVFS_VOLTAGE_SOC].b); - - dev_info(smu->adev->dev, "qStaticVoltageOffset[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].a, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].b, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].c); - dev_info(smu->adev->dev, "qStaticVoltageOffset[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].a, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].b, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].c); - - dev_info(smu->adev->dev, "DcTol[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcTol[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_SOC]); - - dev_info(smu->adev->dev, "DcBtcEnabled[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcBtcEnabled[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_SOC]); - dev_info(smu->adev->dev, "Padding8_GfxBtc[0] = 0x%x\n", pptable->Padding8_GfxBtc[0]); - dev_info(smu->adev->dev, "Padding8_GfxBtc[1] = 0x%x\n", pptable->Padding8_GfxBtc[1]); - - dev_info(smu->adev->dev, "DcBtcMin[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcBtcMin[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_SOC]); - dev_info(smu->adev->dev, "DcBtcMax[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcBtcMax[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_SOC]); - - dev_info(smu->adev->dev, "DcBtcGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcBtcGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_SOC]); - - dev_info(smu->adev->dev, "XgmiDpmPstates\n"); - for (i = 0; i < NUM_XGMI_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = %d\n", i, pptable->XgmiDpmPstates[i]); - dev_info(smu->adev->dev, "XgmiDpmSpare[0] = 0x%02x\n", pptable->XgmiDpmSpare[0]); - dev_info(smu->adev->dev, "XgmiDpmSpare[1] = 0x%02x\n", pptable->XgmiDpmSpare[1]); - - dev_info(smu->adev->dev, "VDDGFX_TVmin = %d\n", pptable->VDDGFX_TVmin); - dev_info(smu->adev->dev, "VDDSOC_TVmin = %d\n", pptable->VDDSOC_TVmin); - dev_info(smu->adev->dev, "VDDGFX_Vmin_HiTemp = %d\n", pptable->VDDGFX_Vmin_HiTemp); - dev_info(smu->adev->dev, "VDDGFX_Vmin_LoTemp = %d\n", pptable->VDDGFX_Vmin_LoTemp); - dev_info(smu->adev->dev, "VDDSOC_Vmin_HiTemp = %d\n", pptable->VDDSOC_Vmin_HiTemp); - dev_info(smu->adev->dev, "VDDSOC_Vmin_LoTemp = %d\n", pptable->VDDSOC_Vmin_LoTemp); - dev_info(smu->adev->dev, "VDDGFX_TVminHystersis = %d\n", pptable->VDDGFX_TVminHystersis); - dev_info(smu->adev->dev, "VDDSOC_TVminHystersis = %d\n", pptable->VDDSOC_TVminHystersis); - - dev_info(smu->adev->dev, "DebugOverrides = 0x%x\n", pptable->DebugOverrides); - dev_info(smu->adev->dev, "ReservedEquation0{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation0.a, - pptable->ReservedEquation0.b, - pptable->ReservedEquation0.c); - dev_info(smu->adev->dev, "ReservedEquation1{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation1.a, - pptable->ReservedEquation1.b, - pptable->ReservedEquation1.c); - dev_info(smu->adev->dev, "ReservedEquation2{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation2.a, - pptable->ReservedEquation2.b, - pptable->ReservedEquation2.c); - dev_info(smu->adev->dev, "ReservedEquation3{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation3.a, - pptable->ReservedEquation3.b, - pptable->ReservedEquation3.c); - - dev_info(smu->adev->dev, "MinVoltageUlvGfx = %d\n", pptable->MinVoltageUlvGfx); - dev_info(smu->adev->dev, "PaddingUlv = %d\n", pptable->PaddingUlv); - - dev_info(smu->adev->dev, "TotalPowerConfig = %d\n", pptable->TotalPowerConfig); - dev_info(smu->adev->dev, "TotalPowerSpare1 = %d\n", pptable->TotalPowerSpare1); - dev_info(smu->adev->dev, "TotalPowerSpare2 = %d\n", pptable->TotalPowerSpare2); - - dev_info(smu->adev->dev, "PccThresholdLow = %d\n", pptable->PccThresholdLow); - dev_info(smu->adev->dev, "PccThresholdHigh = %d\n", pptable->PccThresholdHigh); - - dev_info(smu->adev->dev, "Board Parameters:\n"); - dev_info(smu->adev->dev, "MaxVoltageStepGfx = 0x%x\n", pptable->MaxVoltageStepGfx); - dev_info(smu->adev->dev, "MaxVoltageStepSoc = 0x%x\n", pptable->MaxVoltageStepSoc); - - dev_info(smu->adev->dev, "VddGfxVrMapping = 0x%x\n", pptable->VddGfxVrMapping); - dev_info(smu->adev->dev, "VddSocVrMapping = 0x%x\n", pptable->VddSocVrMapping); - dev_info(smu->adev->dev, "VddMemVrMapping = 0x%x\n", pptable->VddMemVrMapping); - dev_info(smu->adev->dev, "BoardVrMapping = 0x%x\n", pptable->BoardVrMapping); - - dev_info(smu->adev->dev, "GfxUlvPhaseSheddingMask = 0x%x\n", pptable->GfxUlvPhaseSheddingMask); - dev_info(smu->adev->dev, "ExternalSensorPresent = 0x%x\n", pptable->ExternalSensorPresent); - - dev_info(smu->adev->dev, "GfxMaxCurrent = 0x%x\n", pptable->GfxMaxCurrent); - dev_info(smu->adev->dev, "GfxOffset = 0x%x\n", pptable->GfxOffset); - dev_info(smu->adev->dev, "Padding_TelemetryGfx = 0x%x\n", pptable->Padding_TelemetryGfx); - - dev_info(smu->adev->dev, "SocMaxCurrent = 0x%x\n", pptable->SocMaxCurrent); - dev_info(smu->adev->dev, "SocOffset = 0x%x\n", pptable->SocOffset); - dev_info(smu->adev->dev, "Padding_TelemetrySoc = 0x%x\n", pptable->Padding_TelemetrySoc); - - dev_info(smu->adev->dev, "MemMaxCurrent = 0x%x\n", pptable->MemMaxCurrent); - dev_info(smu->adev->dev, "MemOffset = 0x%x\n", pptable->MemOffset); - dev_info(smu->adev->dev, "Padding_TelemetryMem = 0x%x\n", pptable->Padding_TelemetryMem); - - dev_info(smu->adev->dev, "BoardMaxCurrent = 0x%x\n", pptable->BoardMaxCurrent); - dev_info(smu->adev->dev, "BoardOffset = 0x%x\n", pptable->BoardOffset); - dev_info(smu->adev->dev, "Padding_TelemetryBoardInput = 0x%x\n", pptable->Padding_TelemetryBoardInput); - - dev_info(smu->adev->dev, "VR0HotGpio = %d\n", pptable->VR0HotGpio); - dev_info(smu->adev->dev, "VR0HotPolarity = %d\n", pptable->VR0HotPolarity); - dev_info(smu->adev->dev, "VR1HotGpio = %d\n", pptable->VR1HotGpio); - dev_info(smu->adev->dev, "VR1HotPolarity = %d\n", pptable->VR1HotPolarity); - - dev_info(smu->adev->dev, "PllGfxclkSpreadEnabled = %d\n", pptable->PllGfxclkSpreadEnabled); - dev_info(smu->adev->dev, "PllGfxclkSpreadPercent = %d\n", pptable->PllGfxclkSpreadPercent); - dev_info(smu->adev->dev, "PllGfxclkSpreadFreq = %d\n", pptable->PllGfxclkSpreadFreq); - - dev_info(smu->adev->dev, "UclkSpreadEnabled = %d\n", pptable->UclkSpreadEnabled); - dev_info(smu->adev->dev, "UclkSpreadPercent = %d\n", pptable->UclkSpreadPercent); - dev_info(smu->adev->dev, "UclkSpreadFreq = %d\n", pptable->UclkSpreadFreq); - - dev_info(smu->adev->dev, "FclkSpreadEnabled = %d\n", pptable->FclkSpreadEnabled); - dev_info(smu->adev->dev, "FclkSpreadPercent = %d\n", pptable->FclkSpreadPercent); - dev_info(smu->adev->dev, "FclkSpreadFreq = %d\n", pptable->FclkSpreadFreq); - - dev_info(smu->adev->dev, "FllGfxclkSpreadEnabled = %d\n", pptable->FllGfxclkSpreadEnabled); - dev_info(smu->adev->dev, "FllGfxclkSpreadPercent = %d\n", pptable->FllGfxclkSpreadPercent); - dev_info(smu->adev->dev, "FllGfxclkSpreadFreq = %d\n", pptable->FllGfxclkSpreadFreq); - - for (i = 0; i < NUM_I2C_CONTROLLERS; i++) { - dev_info(smu->adev->dev, "I2cControllers[%d]:\n", i); - dev_info(smu->adev->dev, " .Enabled = %d\n", - pptable->I2cControllers[i].Enabled); - dev_info(smu->adev->dev, " .SlaveAddress = 0x%x\n", - pptable->I2cControllers[i].SlaveAddress); - dev_info(smu->adev->dev, " .ControllerPort = %d\n", - pptable->I2cControllers[i].ControllerPort); - dev_info(smu->adev->dev, " .ControllerName = %d\n", - pptable->I2cControllers[i].ControllerName); - dev_info(smu->adev->dev, " .ThermalThrottler = %d\n", - pptable->I2cControllers[i].ThermalThrotter); - dev_info(smu->adev->dev, " .I2cProtocol = %d\n", - pptable->I2cControllers[i].I2cProtocol); - dev_info(smu->adev->dev, " .Speed = %d\n", - pptable->I2cControllers[i].Speed); - } - - dev_info(smu->adev->dev, "MemoryChannelEnabled = %d\n", pptable->MemoryChannelEnabled); - dev_info(smu->adev->dev, "DramBitWidth = %d\n", pptable->DramBitWidth); - - dev_info(smu->adev->dev, "TotalBoardPower = %d\n", pptable->TotalBoardPower); - - dev_info(smu->adev->dev, "XgmiLinkSpeed\n"); - for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = %d\n", i, pptable->XgmiLinkSpeed[i]); - dev_info(smu->adev->dev, "XgmiLinkWidth\n"); - for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = %d\n", i, pptable->XgmiLinkWidth[i]); - dev_info(smu->adev->dev, "XgmiFclkFreq\n"); - for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = %d\n", i, pptable->XgmiFclkFreq[i]); - dev_info(smu->adev->dev, "XgmiSocVoltage\n"); - for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = %d\n", i, pptable->XgmiSocVoltage[i]); - -} - static bool arcturus_is_dpm_running(struct smu_context *smu) { int ret = 0; @@ -2002,7 +1597,9 @@ static bool arcturus_is_dpm_running(struct smu_context *smu) return !!(feature_enabled & SMC_DPM_FEATURE); } -static int arcturus_dpm_set_vcn_enable(struct smu_context *smu, bool enable) +static int arcturus_dpm_set_vcn_enable(struct smu_context *smu, + bool enable, + int inst) { int ret = 0; @@ -2365,8 +1962,6 @@ static const struct pptable_funcs arcturus_ppt_funcs = { .get_power_profile_mode = arcturus_get_power_profile_mode, .set_power_profile_mode = arcturus_set_power_profile_mode, .set_performance_level = arcturus_set_performance_level, - /* debug (internal used) */ - .dump_pptable = arcturus_dump_pptable, .get_power_limit = arcturus_get_power_limit, .is_dpm_running = arcturus_is_dpm_running, .dpm_set_vcn_enable = arcturus_dpm_set_vcn_enable, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 16af1a329621..7fad5dfb39c4 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -1135,7 +1135,9 @@ static int navi10_set_default_dpm_table(struct smu_context *smu) return 0; } -static int navi10_dpm_set_vcn_enable(struct smu_context *smu, bool enable) +static int navi10_dpm_set_vcn_enable(struct smu_context *smu, + bool enable, + int inst) { int ret = 0; @@ -1689,7 +1691,7 @@ static int navi10_force_clk_levels(struct smu_context *smu, if (ret) return 0; - ret = smu_v11_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq); + ret = smu_v11_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false); if (ret) return 0; break; @@ -2004,87 +2006,122 @@ static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf) return size; } -static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size) +#define NAVI10_CUSTOM_PARAMS_COUNT 10 +#define NAVI10_CUSTOM_PARAMS_CLOCKS_COUNT 3 +#define NAVI10_CUSTOM_PARAMS_SIZE (NAVI10_CUSTOM_PARAMS_CLOCKS_COUNT * NAVI10_CUSTOM_PARAMS_COUNT * sizeof(long)) + +static int navi10_set_power_profile_mode_coeff(struct smu_context *smu, + long *input) { DpmActivityMonitorCoeffInt_t activity_monitor; - int workload_type, ret = 0; + int ret, idx; - smu->power_profile_mode = input[size]; + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor), false); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); + return ret; + } - if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { - dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode); - return -EINVAL; + idx = 0 * NAVI10_CUSTOM_PARAMS_COUNT; + if (input[idx]) { + /* Gfxclk */ + activity_monitor.Gfx_FPS = input[idx + 1]; + activity_monitor.Gfx_MinFreqStep = input[idx + 2]; + activity_monitor.Gfx_MinActiveFreqType = input[idx + 3]; + activity_monitor.Gfx_MinActiveFreq = input[idx + 4]; + activity_monitor.Gfx_BoosterFreqType = input[idx + 5]; + activity_monitor.Gfx_BoosterFreq = input[idx + 6]; + activity_monitor.Gfx_PD_Data_limit_c = input[idx + 7]; + activity_monitor.Gfx_PD_Data_error_coeff = input[idx + 8]; + activity_monitor.Gfx_PD_Data_error_rate_coeff = input[idx + 9]; + } + idx = 1 * NAVI10_CUSTOM_PARAMS_COUNT; + if (input[idx]) { + /* Socclk */ + activity_monitor.Soc_FPS = input[idx + 1]; + activity_monitor.Soc_MinFreqStep = input[idx + 2]; + activity_monitor.Soc_MinActiveFreqType = input[idx + 3]; + activity_monitor.Soc_MinActiveFreq = input[idx + 4]; + activity_monitor.Soc_BoosterFreqType = input[idx + 5]; + activity_monitor.Soc_BoosterFreq = input[idx + 6]; + activity_monitor.Soc_PD_Data_limit_c = input[idx + 7]; + activity_monitor.Soc_PD_Data_error_coeff = input[idx + 8]; + activity_monitor.Soc_PD_Data_error_rate_coeff = input[idx + 9]; + } + idx = 2 * NAVI10_CUSTOM_PARAMS_COUNT; + if (input[idx]) { + /* Memclk */ + activity_monitor.Mem_FPS = input[idx + 1]; + activity_monitor.Mem_MinFreqStep = input[idx + 2]; + activity_monitor.Mem_MinActiveFreqType = input[idx + 3]; + activity_monitor.Mem_MinActiveFreq = input[idx + 4]; + activity_monitor.Mem_BoosterFreqType = input[idx + 5]; + activity_monitor.Mem_BoosterFreq = input[idx + 6]; + activity_monitor.Mem_PD_Data_limit_c = input[idx + 7]; + activity_monitor.Mem_PD_Data_error_coeff = input[idx + 8]; + activity_monitor.Mem_PD_Data_error_rate_coeff = input[idx + 9]; + } + + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor), true); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); + return ret; } - if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { - if (size != 10) - return -EINVAL; + return ret; +} - ret = smu_cmn_update_table(smu, - SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, - (void *)(&activity_monitor), false); - if (ret) { - dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); - return ret; - } +static int navi10_set_power_profile_mode(struct smu_context *smu, + u32 workload_mask, + long *custom_params, + u32 custom_params_max_idx) +{ + u32 backend_workload_mask = 0; + int ret, idx = -1, i; - switch (input[0]) { - case 0: /* Gfxclk */ - activity_monitor.Gfx_FPS = input[1]; - activity_monitor.Gfx_MinFreqStep = input[2]; - activity_monitor.Gfx_MinActiveFreqType = input[3]; - activity_monitor.Gfx_MinActiveFreq = input[4]; - activity_monitor.Gfx_BoosterFreqType = input[5]; - activity_monitor.Gfx_BoosterFreq = input[6]; - activity_monitor.Gfx_PD_Data_limit_c = input[7]; - activity_monitor.Gfx_PD_Data_error_coeff = input[8]; - activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9]; - break; - case 1: /* Socclk */ - activity_monitor.Soc_FPS = input[1]; - activity_monitor.Soc_MinFreqStep = input[2]; - activity_monitor.Soc_MinActiveFreqType = input[3]; - activity_monitor.Soc_MinActiveFreq = input[4]; - activity_monitor.Soc_BoosterFreqType = input[5]; - activity_monitor.Soc_BoosterFreq = input[6]; - activity_monitor.Soc_PD_Data_limit_c = input[7]; - activity_monitor.Soc_PD_Data_error_coeff = input[8]; - activity_monitor.Soc_PD_Data_error_rate_coeff = input[9]; - break; - case 2: /* Memclk */ - activity_monitor.Mem_FPS = input[1]; - activity_monitor.Mem_MinFreqStep = input[2]; - activity_monitor.Mem_MinActiveFreqType = input[3]; - activity_monitor.Mem_MinActiveFreq = input[4]; - activity_monitor.Mem_BoosterFreqType = input[5]; - activity_monitor.Mem_BoosterFreq = input[6]; - activity_monitor.Mem_PD_Data_limit_c = input[7]; - activity_monitor.Mem_PD_Data_error_coeff = input[8]; - activity_monitor.Mem_PD_Data_error_rate_coeff = input[9]; - break; - default: - return -EINVAL; - } + smu_cmn_get_backend_workload_mask(smu, workload_mask, + &backend_workload_mask); - ret = smu_cmn_update_table(smu, - SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, - (void *)(&activity_monitor), true); + if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) { + if (!smu->custom_profile_params) { + smu->custom_profile_params = kzalloc(NAVI10_CUSTOM_PARAMS_SIZE, GFP_KERNEL); + if (!smu->custom_profile_params) + return -ENOMEM; + } + if (custom_params && custom_params_max_idx) { + if (custom_params_max_idx != NAVI10_CUSTOM_PARAMS_COUNT) + return -EINVAL; + if (custom_params[0] >= NAVI10_CUSTOM_PARAMS_CLOCKS_COUNT) + return -EINVAL; + idx = custom_params[0] * NAVI10_CUSTOM_PARAMS_COUNT; + smu->custom_profile_params[idx] = 1; + for (i = 1; i < custom_params_max_idx; i++) + smu->custom_profile_params[idx + i] = custom_params[i]; + } + ret = navi10_set_power_profile_mode_coeff(smu, + smu->custom_profile_params); if (ret) { - dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); + if (idx != -1) + smu->custom_profile_params[idx] = 0; return ret; } + } else if (smu->custom_profile_params) { + memset(smu->custom_profile_params, 0, NAVI10_CUSTOM_PARAMS_SIZE); } - /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, - smu->power_profile_mode); - if (workload_type < 0) - return -EINVAL; ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - 1 << workload_type, NULL); - if (ret) - dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__); + backend_workload_mask, NULL); + if (ret) { + dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n", + workload_mask); + if (idx != -1) + smu->custom_profile_params[idx] = 0; + return ret; + } return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 9c3c48297cba..286777ada1df 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -1152,7 +1152,9 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu) return 0; } -static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu, bool enable) +static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu, + bool enable, + int inst) { struct amdgpu_device *adev = smu->adev; int i, ret = 0; @@ -1469,7 +1471,7 @@ static int sienna_cichlid_force_clk_levels(struct smu_context *smu, if (ret) goto forec_level_out; - ret = smu_v11_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq); + ret = smu_v11_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false); if (ret) goto forec_level_out; break; @@ -1706,90 +1708,126 @@ static int sienna_cichlid_get_power_profile_mode(struct smu_context *smu, char * return size; } -static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size) +#define SIENNA_CICHLID_CUSTOM_PARAMS_COUNT 10 +#define SIENNA_CICHLID_CUSTOM_PARAMS_CLOCK_COUNT 3 +#define SIENNA_CICHLID_CUSTOM_PARAMS_SIZE (SIENNA_CICHLID_CUSTOM_PARAMS_CLOCK_COUNT * SIENNA_CICHLID_CUSTOM_PARAMS_COUNT * sizeof(long)) + +static int sienna_cichlid_set_power_profile_mode_coeff(struct smu_context *smu, + long *input) { DpmActivityMonitorCoeffIntExternal_t activity_monitor_external; DpmActivityMonitorCoeffInt_t *activity_monitor = &(activity_monitor_external.DpmActivityMonitorCoeffInt); - int workload_type, ret = 0; + int ret, idx; - smu->power_profile_mode = input[size]; + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor_external), false); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); + return ret; + } - if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { - dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode); - return -EINVAL; + idx = 0 * SIENNA_CICHLID_CUSTOM_PARAMS_COUNT; + if (input[idx]) { + /* Gfxclk */ + activity_monitor->Gfx_FPS = input[idx + 1]; + activity_monitor->Gfx_MinFreqStep = input[idx + 2]; + activity_monitor->Gfx_MinActiveFreqType = input[idx + 3]; + activity_monitor->Gfx_MinActiveFreq = input[idx + 4]; + activity_monitor->Gfx_BoosterFreqType = input[idx + 5]; + activity_monitor->Gfx_BoosterFreq = input[idx + 6]; + activity_monitor->Gfx_PD_Data_limit_c = input[idx + 7]; + activity_monitor->Gfx_PD_Data_error_coeff = input[idx + 8]; + activity_monitor->Gfx_PD_Data_error_rate_coeff = input[idx + 9]; + } + idx = 1 * SIENNA_CICHLID_CUSTOM_PARAMS_COUNT; + if (input[idx]) { + /* Socclk */ + activity_monitor->Fclk_FPS = input[idx + 1]; + activity_monitor->Fclk_MinFreqStep = input[idx + 2]; + activity_monitor->Fclk_MinActiveFreqType = input[idx + 3]; + activity_monitor->Fclk_MinActiveFreq = input[idx + 4]; + activity_monitor->Fclk_BoosterFreqType = input[idx + 5]; + activity_monitor->Fclk_BoosterFreq = input[idx + 6]; + activity_monitor->Fclk_PD_Data_limit_c = input[idx + 7]; + activity_monitor->Fclk_PD_Data_error_coeff = input[idx + 8]; + activity_monitor->Fclk_PD_Data_error_rate_coeff = input[idx + 9]; + } + idx = 2 * SIENNA_CICHLID_CUSTOM_PARAMS_COUNT; + if (input[idx]) { + /* Memclk */ + activity_monitor->Mem_FPS = input[idx + 1]; + activity_monitor->Mem_MinFreqStep = input[idx + 2]; + activity_monitor->Mem_MinActiveFreqType = input[idx + 3]; + activity_monitor->Mem_MinActiveFreq = input[idx + 4]; + activity_monitor->Mem_BoosterFreqType = input[idx + 5]; + activity_monitor->Mem_BoosterFreq = input[idx + 6]; + activity_monitor->Mem_PD_Data_limit_c = input[idx + 7]; + activity_monitor->Mem_PD_Data_error_coeff = input[idx + 8]; + activity_monitor->Mem_PD_Data_error_rate_coeff = input[idx + 9]; } - if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { - if (size != 10) - return -EINVAL; + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor_external), true); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); + return ret; + } - ret = smu_cmn_update_table(smu, - SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, - (void *)(&activity_monitor_external), false); - if (ret) { - dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); - return ret; - } + return ret; +} - switch (input[0]) { - case 0: /* Gfxclk */ - activity_monitor->Gfx_FPS = input[1]; - activity_monitor->Gfx_MinFreqStep = input[2]; - activity_monitor->Gfx_MinActiveFreqType = input[3]; - activity_monitor->Gfx_MinActiveFreq = input[4]; - activity_monitor->Gfx_BoosterFreqType = input[5]; - activity_monitor->Gfx_BoosterFreq = input[6]; - activity_monitor->Gfx_PD_Data_limit_c = input[7]; - activity_monitor->Gfx_PD_Data_error_coeff = input[8]; - activity_monitor->Gfx_PD_Data_error_rate_coeff = input[9]; - break; - case 1: /* Socclk */ - activity_monitor->Fclk_FPS = input[1]; - activity_monitor->Fclk_MinFreqStep = input[2]; - activity_monitor->Fclk_MinActiveFreqType = input[3]; - activity_monitor->Fclk_MinActiveFreq = input[4]; - activity_monitor->Fclk_BoosterFreqType = input[5]; - activity_monitor->Fclk_BoosterFreq = input[6]; - activity_monitor->Fclk_PD_Data_limit_c = input[7]; - activity_monitor->Fclk_PD_Data_error_coeff = input[8]; - activity_monitor->Fclk_PD_Data_error_rate_coeff = input[9]; - break; - case 2: /* Memclk */ - activity_monitor->Mem_FPS = input[1]; - activity_monitor->Mem_MinFreqStep = input[2]; - activity_monitor->Mem_MinActiveFreqType = input[3]; - activity_monitor->Mem_MinActiveFreq = input[4]; - activity_monitor->Mem_BoosterFreqType = input[5]; - activity_monitor->Mem_BoosterFreq = input[6]; - activity_monitor->Mem_PD_Data_limit_c = input[7]; - activity_monitor->Mem_PD_Data_error_coeff = input[8]; - activity_monitor->Mem_PD_Data_error_rate_coeff = input[9]; - break; - default: - return -EINVAL; - } +static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, + u32 workload_mask, + long *custom_params, + u32 custom_params_max_idx) +{ + u32 backend_workload_mask = 0; + int ret, idx = -1, i; - ret = smu_cmn_update_table(smu, - SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, - (void *)(&activity_monitor_external), true); + smu_cmn_get_backend_workload_mask(smu, workload_mask, + &backend_workload_mask); + + if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) { + if (!smu->custom_profile_params) { + smu->custom_profile_params = + kzalloc(SIENNA_CICHLID_CUSTOM_PARAMS_SIZE, GFP_KERNEL); + if (!smu->custom_profile_params) + return -ENOMEM; + } + if (custom_params && custom_params_max_idx) { + if (custom_params_max_idx != SIENNA_CICHLID_CUSTOM_PARAMS_COUNT) + return -EINVAL; + if (custom_params[0] >= SIENNA_CICHLID_CUSTOM_PARAMS_CLOCK_COUNT) + return -EINVAL; + idx = custom_params[0] * SIENNA_CICHLID_CUSTOM_PARAMS_COUNT; + smu->custom_profile_params[idx] = 1; + for (i = 1; i < custom_params_max_idx; i++) + smu->custom_profile_params[idx + i] = custom_params[i]; + } + ret = sienna_cichlid_set_power_profile_mode_coeff(smu, + smu->custom_profile_params); if (ret) { - dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); + if (idx != -1) + smu->custom_profile_params[idx] = 0; return ret; } + } else if (smu->custom_profile_params) { + memset(smu->custom_profile_params, 0, SIENNA_CICHLID_CUSTOM_PARAMS_SIZE); } - /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, - smu->power_profile_mode); - if (workload_type < 0) - return -EINVAL; ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - 1 << workload_type, NULL); - if (ret) - dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__); + backend_workload_mask, NULL); + if (ret) { + dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n", + workload_mask); + if (idx != -1) + smu->custom_profile_params[idx] = 0; + return ret; + } return ret; } @@ -2493,1274 +2531,6 @@ static bool sienna_cichlid_is_mode1_reset_supported(struct smu_context *smu) return val != 0x0; } -static void beige_goby_dump_pptable(struct smu_context *smu) -{ - struct smu_table_context *table_context = &smu->smu_table; - PPTable_beige_goby_t *pptable = table_context->driver_pptable; - int i; - - dev_info(smu->adev->dev, "Dumped PPTable:\n"); - - dev_info(smu->adev->dev, "Version = 0x%08x\n", pptable->Version); - dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", pptable->FeaturesToRun[0]); - dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", pptable->FeaturesToRun[1]); - - for (i = 0; i < PPT_THROTTLER_COUNT; i++) { - dev_info(smu->adev->dev, "SocketPowerLimitAc[%d] = 0x%x\n", i, pptable->SocketPowerLimitAc[i]); - dev_info(smu->adev->dev, "SocketPowerLimitAcTau[%d] = 0x%x\n", i, pptable->SocketPowerLimitAcTau[i]); - dev_info(smu->adev->dev, "SocketPowerLimitDc[%d] = 0x%x\n", i, pptable->SocketPowerLimitDc[i]); - dev_info(smu->adev->dev, "SocketPowerLimitDcTau[%d] = 0x%x\n", i, pptable->SocketPowerLimitDcTau[i]); - } - - for (i = 0; i < TDC_THROTTLER_COUNT; i++) { - dev_info(smu->adev->dev, "TdcLimit[%d] = 0x%x\n", i, pptable->TdcLimit[i]); - dev_info(smu->adev->dev, "TdcLimitTau[%d] = 0x%x\n", i, pptable->TdcLimitTau[i]); - } - - for (i = 0; i < TEMP_COUNT; i++) { - dev_info(smu->adev->dev, "TemperatureLimit[%d] = 0x%x\n", i, pptable->TemperatureLimit[i]); - } - - dev_info(smu->adev->dev, "FitLimit = 0x%x\n", pptable->FitLimit); - dev_info(smu->adev->dev, "TotalPowerConfig = 0x%x\n", pptable->TotalPowerConfig); - dev_info(smu->adev->dev, "TotalPowerPadding[0] = 0x%x\n", pptable->TotalPowerPadding[0]); - dev_info(smu->adev->dev, "TotalPowerPadding[1] = 0x%x\n", pptable->TotalPowerPadding[1]); - dev_info(smu->adev->dev, "TotalPowerPadding[2] = 0x%x\n", pptable->TotalPowerPadding[2]); - - dev_info(smu->adev->dev, "ApccPlusResidencyLimit = 0x%x\n", pptable->ApccPlusResidencyLimit); - for (i = 0; i < NUM_SMNCLK_DPM_LEVELS; i++) { - dev_info(smu->adev->dev, "SmnclkDpmFreq[%d] = 0x%x\n", i, pptable->SmnclkDpmFreq[i]); - dev_info(smu->adev->dev, "SmnclkDpmVoltage[%d] = 0x%x\n", i, pptable->SmnclkDpmVoltage[i]); - } - dev_info(smu->adev->dev, "ThrottlerControlMask = 0x%x\n", pptable->ThrottlerControlMask); - - dev_info(smu->adev->dev, "FwDStateMask = 0x%x\n", pptable->FwDStateMask); - - dev_info(smu->adev->dev, "UlvVoltageOffsetSoc = 0x%x\n", pptable->UlvVoltageOffsetSoc); - dev_info(smu->adev->dev, "UlvVoltageOffsetGfx = 0x%x\n", pptable->UlvVoltageOffsetGfx); - dev_info(smu->adev->dev, "MinVoltageUlvGfx = 0x%x\n", pptable->MinVoltageUlvGfx); - dev_info(smu->adev->dev, "MinVoltageUlvSoc = 0x%x\n", pptable->MinVoltageUlvSoc); - - dev_info(smu->adev->dev, "SocLIVmin = 0x%x\n", pptable->SocLIVmin); - - dev_info(smu->adev->dev, "GceaLinkMgrIdleThreshold = 0x%x\n", pptable->GceaLinkMgrIdleThreshold); - - dev_info(smu->adev->dev, "MinVoltageGfx = 0x%x\n", pptable->MinVoltageGfx); - dev_info(smu->adev->dev, "MinVoltageSoc = 0x%x\n", pptable->MinVoltageSoc); - dev_info(smu->adev->dev, "MaxVoltageGfx = 0x%x\n", pptable->MaxVoltageGfx); - dev_info(smu->adev->dev, "MaxVoltageSoc = 0x%x\n", pptable->MaxVoltageSoc); - - dev_info(smu->adev->dev, "LoadLineResistanceGfx = 0x%x\n", pptable->LoadLineResistanceGfx); - dev_info(smu->adev->dev, "LoadLineResistanceSoc = 0x%x\n", pptable->LoadLineResistanceSoc); - - dev_info(smu->adev->dev, "VDDGFX_TVmin = 0x%x\n", pptable->VDDGFX_TVmin); - dev_info(smu->adev->dev, "VDDSOC_TVmin = 0x%x\n", pptable->VDDSOC_TVmin); - dev_info(smu->adev->dev, "VDDGFX_Vmin_HiTemp = 0x%x\n", pptable->VDDGFX_Vmin_HiTemp); - dev_info(smu->adev->dev, "VDDGFX_Vmin_LoTemp = 0x%x\n", pptable->VDDGFX_Vmin_LoTemp); - dev_info(smu->adev->dev, "VDDSOC_Vmin_HiTemp = 0x%x\n", pptable->VDDSOC_Vmin_HiTemp); - dev_info(smu->adev->dev, "VDDSOC_Vmin_LoTemp = 0x%x\n", pptable->VDDSOC_Vmin_LoTemp); - dev_info(smu->adev->dev, "VDDGFX_TVminHystersis = 0x%x\n", pptable->VDDGFX_TVminHystersis); - dev_info(smu->adev->dev, "VDDSOC_TVminHystersis = 0x%x\n", pptable->VDDSOC_TVminHystersis); - - dev_info(smu->adev->dev, "[PPCLK_GFXCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_GFXCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_GFXCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_GFXCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_GFXCLK].Padding, - pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_GFXCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_SOCCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_SOCCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_SOCCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_SOCCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_SOCCLK].Padding, - pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_SOCCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_UCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_UCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_UCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_UCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_UCLK].Padding, - pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_UCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_UCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_FCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_FCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_FCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_FCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_FCLK].Padding, - pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_FCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_FCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_DCLK_0]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_DCLK_0].VoltageMode, - pptable->DpmDescriptor[PPCLK_DCLK_0].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_DCLK_0].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_DCLK_0].Padding, - pptable->DpmDescriptor[PPCLK_DCLK_0].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_DCLK_0].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_DCLK_0].SsCurve.a, - pptable->DpmDescriptor[PPCLK_DCLK_0].SsCurve.b, - pptable->DpmDescriptor[PPCLK_DCLK_0].SsCurve.c, - pptable->DpmDescriptor[PPCLK_DCLK_0].SsFmin, - pptable->DpmDescriptor[PPCLK_DCLK_0].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_VCLK_0]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_VCLK_0].VoltageMode, - pptable->DpmDescriptor[PPCLK_VCLK_0].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_VCLK_0].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_VCLK_0].Padding, - pptable->DpmDescriptor[PPCLK_VCLK_0].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_VCLK_0].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_VCLK_0].SsCurve.a, - pptable->DpmDescriptor[PPCLK_VCLK_0].SsCurve.b, - pptable->DpmDescriptor[PPCLK_VCLK_0].SsCurve.c, - pptable->DpmDescriptor[PPCLK_VCLK_0].SsFmin, - pptable->DpmDescriptor[PPCLK_VCLK_0].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_DCLK_1]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_DCLK_1].VoltageMode, - pptable->DpmDescriptor[PPCLK_DCLK_1].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_DCLK_1].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_DCLK_1].Padding, - pptable->DpmDescriptor[PPCLK_DCLK_1].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_DCLK_1].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_DCLK_1].SsCurve.a, - pptable->DpmDescriptor[PPCLK_DCLK_1].SsCurve.b, - pptable->DpmDescriptor[PPCLK_DCLK_1].SsCurve.c, - pptable->DpmDescriptor[PPCLK_DCLK_1].SsFmin, - pptable->DpmDescriptor[PPCLK_DCLK_1].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_VCLK_1]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_VCLK_1].VoltageMode, - pptable->DpmDescriptor[PPCLK_VCLK_1].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_VCLK_1].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_VCLK_1].Padding, - pptable->DpmDescriptor[PPCLK_VCLK_1].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_VCLK_1].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_VCLK_1].SsCurve.a, - pptable->DpmDescriptor[PPCLK_VCLK_1].SsCurve.b, - pptable->DpmDescriptor[PPCLK_VCLK_1].SsCurve.c, - pptable->DpmDescriptor[PPCLK_VCLK_1].SsFmin, - pptable->DpmDescriptor[PPCLK_VCLK_1].Padding16); - - dev_info(smu->adev->dev, "FreqTableGfx\n"); - for (i = 0; i < NUM_GFXCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableGfx[i]); - - dev_info(smu->adev->dev, "FreqTableVclk\n"); - for (i = 0; i < NUM_VCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableVclk[i]); - - dev_info(smu->adev->dev, "FreqTableDclk\n"); - for (i = 0; i < NUM_DCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableDclk[i]); - - dev_info(smu->adev->dev, "FreqTableSocclk\n"); - for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableSocclk[i]); - - dev_info(smu->adev->dev, "FreqTableUclk\n"); - for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableUclk[i]); - - dev_info(smu->adev->dev, "FreqTableFclk\n"); - for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableFclk[i]); - - dev_info(smu->adev->dev, "DcModeMaxFreq\n"); - dev_info(smu->adev->dev, " .PPCLK_GFXCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_GFXCLK]); - dev_info(smu->adev->dev, " .PPCLK_SOCCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_SOCCLK]); - dev_info(smu->adev->dev, " .PPCLK_UCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_UCLK]); - dev_info(smu->adev->dev, " .PPCLK_FCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_FCLK]); - dev_info(smu->adev->dev, " .PPCLK_DCLK_0 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_DCLK_0]); - dev_info(smu->adev->dev, " .PPCLK_VCLK_0 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_VCLK_0]); - dev_info(smu->adev->dev, " .PPCLK_DCLK_1 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_DCLK_1]); - dev_info(smu->adev->dev, " .PPCLK_VCLK_1 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_VCLK_1]); - - dev_info(smu->adev->dev, "FreqTableUclkDiv\n"); - for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->FreqTableUclkDiv[i]); - - dev_info(smu->adev->dev, "FclkBoostFreq = 0x%x\n", pptable->FclkBoostFreq); - dev_info(smu->adev->dev, "FclkParamPadding = 0x%x\n", pptable->FclkParamPadding); - - dev_info(smu->adev->dev, "Mp0clkFreq\n"); - for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->Mp0clkFreq[i]); - - dev_info(smu->adev->dev, "Mp0DpmVoltage\n"); - for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->Mp0DpmVoltage[i]); - - dev_info(smu->adev->dev, "MemVddciVoltage\n"); - for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->MemVddciVoltage[i]); - - dev_info(smu->adev->dev, "MemMvddVoltage\n"); - for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->MemMvddVoltage[i]); - - dev_info(smu->adev->dev, "GfxclkFgfxoffEntry = 0x%x\n", pptable->GfxclkFgfxoffEntry); - dev_info(smu->adev->dev, "GfxclkFinit = 0x%x\n", pptable->GfxclkFinit); - dev_info(smu->adev->dev, "GfxclkFidle = 0x%x\n", pptable->GfxclkFidle); - dev_info(smu->adev->dev, "GfxclkSource = 0x%x\n", pptable->GfxclkSource); - dev_info(smu->adev->dev, "GfxclkPadding = 0x%x\n", pptable->GfxclkPadding); - - dev_info(smu->adev->dev, "GfxGpoSubFeatureMask = 0x%x\n", pptable->GfxGpoSubFeatureMask); - - dev_info(smu->adev->dev, "GfxGpoEnabledWorkPolicyMask = 0x%x\n", pptable->GfxGpoEnabledWorkPolicyMask); - dev_info(smu->adev->dev, "GfxGpoDisabledWorkPolicyMask = 0x%x\n", pptable->GfxGpoDisabledWorkPolicyMask); - dev_info(smu->adev->dev, "GfxGpoPadding[0] = 0x%x\n", pptable->GfxGpoPadding[0]); - dev_info(smu->adev->dev, "GfxGpoVotingAllow = 0x%x\n", pptable->GfxGpoVotingAllow); - dev_info(smu->adev->dev, "GfxGpoPadding32[0] = 0x%x\n", pptable->GfxGpoPadding32[0]); - dev_info(smu->adev->dev, "GfxGpoPadding32[1] = 0x%x\n", pptable->GfxGpoPadding32[1]); - dev_info(smu->adev->dev, "GfxGpoPadding32[2] = 0x%x\n", pptable->GfxGpoPadding32[2]); - dev_info(smu->adev->dev, "GfxGpoPadding32[3] = 0x%x\n", pptable->GfxGpoPadding32[3]); - dev_info(smu->adev->dev, "GfxDcsFopt = 0x%x\n", pptable->GfxDcsFopt); - dev_info(smu->adev->dev, "GfxDcsFclkFopt = 0x%x\n", pptable->GfxDcsFclkFopt); - dev_info(smu->adev->dev, "GfxDcsUclkFopt = 0x%x\n", pptable->GfxDcsUclkFopt); - - dev_info(smu->adev->dev, "DcsGfxOffVoltage = 0x%x\n", pptable->DcsGfxOffVoltage); - dev_info(smu->adev->dev, "DcsMinGfxOffTime = 0x%x\n", pptable->DcsMinGfxOffTime); - dev_info(smu->adev->dev, "DcsMaxGfxOffTime = 0x%x\n", pptable->DcsMaxGfxOffTime); - dev_info(smu->adev->dev, "DcsMinCreditAccum = 0x%x\n", pptable->DcsMinCreditAccum); - dev_info(smu->adev->dev, "DcsExitHysteresis = 0x%x\n", pptable->DcsExitHysteresis); - dev_info(smu->adev->dev, "DcsTimeout = 0x%x\n", pptable->DcsTimeout); - - dev_info(smu->adev->dev, "DcsParamPadding[0] = 0x%x\n", pptable->DcsParamPadding[0]); - dev_info(smu->adev->dev, "DcsParamPadding[1] = 0x%x\n", pptable->DcsParamPadding[1]); - dev_info(smu->adev->dev, "DcsParamPadding[2] = 0x%x\n", pptable->DcsParamPadding[2]); - dev_info(smu->adev->dev, "DcsParamPadding[3] = 0x%x\n", pptable->DcsParamPadding[3]); - dev_info(smu->adev->dev, "DcsParamPadding[4] = 0x%x\n", pptable->DcsParamPadding[4]); - - dev_info(smu->adev->dev, "FlopsPerByteTable\n"); - for (i = 0; i < RLC_PACE_TABLE_NUM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->FlopsPerByteTable[i]); - - dev_info(smu->adev->dev, "LowestUclkReservedForUlv = 0x%x\n", pptable->LowestUclkReservedForUlv); - dev_info(smu->adev->dev, "vddingMem[0] = 0x%x\n", pptable->PaddingMem[0]); - dev_info(smu->adev->dev, "vddingMem[1] = 0x%x\n", pptable->PaddingMem[1]); - dev_info(smu->adev->dev, "vddingMem[2] = 0x%x\n", pptable->PaddingMem[2]); - - dev_info(smu->adev->dev, "UclkDpmPstates\n"); - for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->UclkDpmPstates[i]); - - dev_info(smu->adev->dev, "UclkDpmSrcFreqRange\n"); - dev_info(smu->adev->dev, " .Fmin = 0x%x\n", - pptable->UclkDpmSrcFreqRange.Fmin); - dev_info(smu->adev->dev, " .Fmax = 0x%x\n", - pptable->UclkDpmSrcFreqRange.Fmax); - dev_info(smu->adev->dev, "UclkDpmTargFreqRange\n"); - dev_info(smu->adev->dev, " .Fmin = 0x%x\n", - pptable->UclkDpmTargFreqRange.Fmin); - dev_info(smu->adev->dev, " .Fmax = 0x%x\n", - pptable->UclkDpmTargFreqRange.Fmax); - dev_info(smu->adev->dev, "UclkDpmMidstepFreq = 0x%x\n", pptable->UclkDpmMidstepFreq); - dev_info(smu->adev->dev, "UclkMidstepPadding = 0x%x\n", pptable->UclkMidstepPadding); - - dev_info(smu->adev->dev, "PcieGenSpeed\n"); - for (i = 0; i < NUM_LINK_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->PcieGenSpeed[i]); - - dev_info(smu->adev->dev, "PcieLaneCount\n"); - for (i = 0; i < NUM_LINK_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->PcieLaneCount[i]); - - dev_info(smu->adev->dev, "LclkFreq\n"); - for (i = 0; i < NUM_LINK_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->LclkFreq[i]); - - dev_info(smu->adev->dev, "FanStopTemp = 0x%x\n", pptable->FanStopTemp); - dev_info(smu->adev->dev, "FanStartTemp = 0x%x\n", pptable->FanStartTemp); - - dev_info(smu->adev->dev, "FanGain\n"); - for (i = 0; i < TEMP_COUNT; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->FanGain[i]); - - dev_info(smu->adev->dev, "FanPwmMin = 0x%x\n", pptable->FanPwmMin); - dev_info(smu->adev->dev, "FanAcousticLimitRpm = 0x%x\n", pptable->FanAcousticLimitRpm); - dev_info(smu->adev->dev, "FanThrottlingRpm = 0x%x\n", pptable->FanThrottlingRpm); - dev_info(smu->adev->dev, "FanMaximumRpm = 0x%x\n", pptable->FanMaximumRpm); - dev_info(smu->adev->dev, "MGpuFanBoostLimitRpm = 0x%x\n", pptable->MGpuFanBoostLimitRpm); - dev_info(smu->adev->dev, "FanTargetTemperature = 0x%x\n", pptable->FanTargetTemperature); - dev_info(smu->adev->dev, "FanTargetGfxclk = 0x%x\n", pptable->FanTargetGfxclk); - dev_info(smu->adev->dev, "FanPadding16 = 0x%x\n", pptable->FanPadding16); - dev_info(smu->adev->dev, "FanTempInputSelect = 0x%x\n", pptable->FanTempInputSelect); - dev_info(smu->adev->dev, "FanPadding = 0x%x\n", pptable->FanPadding); - dev_info(smu->adev->dev, "FanZeroRpmEnable = 0x%x\n", pptable->FanZeroRpmEnable); - dev_info(smu->adev->dev, "FanTachEdgePerRev = 0x%x\n", pptable->FanTachEdgePerRev); - - dev_info(smu->adev->dev, "FuzzyFan_ErrorSetDelta = 0x%x\n", pptable->FuzzyFan_ErrorSetDelta); - dev_info(smu->adev->dev, "FuzzyFan_ErrorRateSetDelta = 0x%x\n", pptable->FuzzyFan_ErrorRateSetDelta); - dev_info(smu->adev->dev, "FuzzyFan_PwmSetDelta = 0x%x\n", pptable->FuzzyFan_PwmSetDelta); - dev_info(smu->adev->dev, "FuzzyFan_Reserved = 0x%x\n", pptable->FuzzyFan_Reserved); - - dev_info(smu->adev->dev, "OverrideAvfsGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "OverrideAvfsGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_SOC]); - dev_info(smu->adev->dev, "dBtcGbGfxDfllModelSelect = 0x%x\n", pptable->dBtcGbGfxDfllModelSelect); - dev_info(smu->adev->dev, "Padding8_Avfs = 0x%x\n", pptable->Padding8_Avfs); - - dev_info(smu->adev->dev, "qAvfsGb[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qAvfsGb[AVFS_VOLTAGE_GFX].a, - pptable->qAvfsGb[AVFS_VOLTAGE_GFX].b, - pptable->qAvfsGb[AVFS_VOLTAGE_GFX].c); - dev_info(smu->adev->dev, "qAvfsGb[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qAvfsGb[AVFS_VOLTAGE_SOC].a, - pptable->qAvfsGb[AVFS_VOLTAGE_SOC].b, - pptable->qAvfsGb[AVFS_VOLTAGE_SOC].c); - dev_info(smu->adev->dev, "dBtcGbGfxPll{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbGfxPll.a, - pptable->dBtcGbGfxPll.b, - pptable->dBtcGbGfxPll.c); - dev_info(smu->adev->dev, "dBtcGbGfxAfll{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbGfxDfll.a, - pptable->dBtcGbGfxDfll.b, - pptable->dBtcGbGfxDfll.c); - dev_info(smu->adev->dev, "dBtcGbSoc{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbSoc.a, - pptable->dBtcGbSoc.b, - pptable->dBtcGbSoc.c); - dev_info(smu->adev->dev, "qAgingGb[AVFS_VOLTAGE_GFX]{m = 0x%x b = 0x%x}\n", - pptable->qAgingGb[AVFS_VOLTAGE_GFX].m, - pptable->qAgingGb[AVFS_VOLTAGE_GFX].b); - dev_info(smu->adev->dev, "qAgingGb[AVFS_VOLTAGE_SOC]{m = 0x%x b = 0x%x}\n", - pptable->qAgingGb[AVFS_VOLTAGE_SOC].m, - pptable->qAgingGb[AVFS_VOLTAGE_SOC].b); - - dev_info(smu->adev->dev, "PiecewiseLinearDroopIntGfxDfll\n"); - for (i = 0; i < NUM_PIECE_WISE_LINEAR_DROOP_MODEL_VF_POINTS; i++) { - dev_info(smu->adev->dev, " Fset[%d] = 0x%x\n", - i, pptable->PiecewiseLinearDroopIntGfxDfll.Fset[i]); - dev_info(smu->adev->dev, " Vdroop[%d] = 0x%x\n", - i, pptable->PiecewiseLinearDroopIntGfxDfll.Vdroop[i]); - } - - dev_info(smu->adev->dev, "qStaticVoltageOffset[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].a, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].b, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].c); - dev_info(smu->adev->dev, "qStaticVoltageOffset[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].a, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].b, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].c); - - dev_info(smu->adev->dev, "DcTol[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcTol[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_SOC]); - - dev_info(smu->adev->dev, "DcBtcEnabled[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcBtcEnabled[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_SOC]); - dev_info(smu->adev->dev, "Padding8_GfxBtc[0] = 0x%x\n", pptable->Padding8_GfxBtc[0]); - dev_info(smu->adev->dev, "Padding8_GfxBtc[1] = 0x%x\n", pptable->Padding8_GfxBtc[1]); - - dev_info(smu->adev->dev, "DcBtcMin[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcBtcMin[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_SOC]); - dev_info(smu->adev->dev, "DcBtcMax[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcBtcMax[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_SOC]); - - dev_info(smu->adev->dev, "DcBtcGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcBtcGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_SOC]); - - dev_info(smu->adev->dev, "XgmiDpmPstates\n"); - for (i = 0; i < NUM_XGMI_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiDpmPstates[i]); - dev_info(smu->adev->dev, "XgmiDpmSpare[0] = 0x%02x\n", pptable->XgmiDpmSpare[0]); - dev_info(smu->adev->dev, "XgmiDpmSpare[1] = 0x%02x\n", pptable->XgmiDpmSpare[1]); - - dev_info(smu->adev->dev, "DebugOverrides = 0x%x\n", pptable->DebugOverrides); - dev_info(smu->adev->dev, "ReservedEquation0{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation0.a, - pptable->ReservedEquation0.b, - pptable->ReservedEquation0.c); - dev_info(smu->adev->dev, "ReservedEquation1{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation1.a, - pptable->ReservedEquation1.b, - pptable->ReservedEquation1.c); - dev_info(smu->adev->dev, "ReservedEquation2{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation2.a, - pptable->ReservedEquation2.b, - pptable->ReservedEquation2.c); - dev_info(smu->adev->dev, "ReservedEquation3{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation3.a, - pptable->ReservedEquation3.b, - pptable->ReservedEquation3.c); - - dev_info(smu->adev->dev, "SkuReserved[0] = 0x%x\n", pptable->SkuReserved[0]); - dev_info(smu->adev->dev, "SkuReserved[1] = 0x%x\n", pptable->SkuReserved[1]); - dev_info(smu->adev->dev, "SkuReserved[2] = 0x%x\n", pptable->SkuReserved[2]); - dev_info(smu->adev->dev, "SkuReserved[3] = 0x%x\n", pptable->SkuReserved[3]); - dev_info(smu->adev->dev, "SkuReserved[4] = 0x%x\n", pptable->SkuReserved[4]); - dev_info(smu->adev->dev, "SkuReserved[5] = 0x%x\n", pptable->SkuReserved[5]); - dev_info(smu->adev->dev, "SkuReserved[6] = 0x%x\n", pptable->SkuReserved[6]); - dev_info(smu->adev->dev, "SkuReserved[7] = 0x%x\n", pptable->SkuReserved[7]); - - dev_info(smu->adev->dev, "GamingClk[0] = 0x%x\n", pptable->GamingClk[0]); - dev_info(smu->adev->dev, "GamingClk[1] = 0x%x\n", pptable->GamingClk[1]); - dev_info(smu->adev->dev, "GamingClk[2] = 0x%x\n", pptable->GamingClk[2]); - dev_info(smu->adev->dev, "GamingClk[3] = 0x%x\n", pptable->GamingClk[3]); - dev_info(smu->adev->dev, "GamingClk[4] = 0x%x\n", pptable->GamingClk[4]); - dev_info(smu->adev->dev, "GamingClk[5] = 0x%x\n", pptable->GamingClk[5]); - - for (i = 0; i < NUM_I2C_CONTROLLERS; i++) { - dev_info(smu->adev->dev, "I2cControllers[%d]:\n", i); - dev_info(smu->adev->dev, " .Enabled = 0x%x\n", - pptable->I2cControllers[i].Enabled); - dev_info(smu->adev->dev, " .Speed = 0x%x\n", - pptable->I2cControllers[i].Speed); - dev_info(smu->adev->dev, " .SlaveAddress = 0x%x\n", - pptable->I2cControllers[i].SlaveAddress); - dev_info(smu->adev->dev, " .ControllerPort = 0x%x\n", - pptable->I2cControllers[i].ControllerPort); - dev_info(smu->adev->dev, " .ControllerName = 0x%x\n", - pptable->I2cControllers[i].ControllerName); - dev_info(smu->adev->dev, " .ThermalThrottler = 0x%x\n", - pptable->I2cControllers[i].ThermalThrotter); - dev_info(smu->adev->dev, " .I2cProtocol = 0x%x\n", - pptable->I2cControllers[i].I2cProtocol); - dev_info(smu->adev->dev, " .PaddingConfig = 0x%x\n", - pptable->I2cControllers[i].PaddingConfig); - } - - dev_info(smu->adev->dev, "GpioScl = 0x%x\n", pptable->GpioScl); - dev_info(smu->adev->dev, "GpioSda = 0x%x\n", pptable->GpioSda); - dev_info(smu->adev->dev, "FchUsbPdSlaveAddr = 0x%x\n", pptable->FchUsbPdSlaveAddr); - dev_info(smu->adev->dev, "I2cSpare[0] = 0x%x\n", pptable->I2cSpare[0]); - - dev_info(smu->adev->dev, "Board Parameters:\n"); - dev_info(smu->adev->dev, "VddGfxVrMapping = 0x%x\n", pptable->VddGfxVrMapping); - dev_info(smu->adev->dev, "VddSocVrMapping = 0x%x\n", pptable->VddSocVrMapping); - dev_info(smu->adev->dev, "VddMem0VrMapping = 0x%x\n", pptable->VddMem0VrMapping); - dev_info(smu->adev->dev, "VddMem1VrMapping = 0x%x\n", pptable->VddMem1VrMapping); - dev_info(smu->adev->dev, "GfxUlvPhaseSheddingMask = 0x%x\n", pptable->GfxUlvPhaseSheddingMask); - dev_info(smu->adev->dev, "SocUlvPhaseSheddingMask = 0x%x\n", pptable->SocUlvPhaseSheddingMask); - dev_info(smu->adev->dev, "VddciUlvPhaseSheddingMask = 0x%x\n", pptable->VddciUlvPhaseSheddingMask); - dev_info(smu->adev->dev, "MvddUlvPhaseSheddingMask = 0x%x\n", pptable->MvddUlvPhaseSheddingMask); - - dev_info(smu->adev->dev, "GfxMaxCurrent = 0x%x\n", pptable->GfxMaxCurrent); - dev_info(smu->adev->dev, "GfxOffset = 0x%x\n", pptable->GfxOffset); - dev_info(smu->adev->dev, "Padding_TelemetryGfx = 0x%x\n", pptable->Padding_TelemetryGfx); - - dev_info(smu->adev->dev, "SocMaxCurrent = 0x%x\n", pptable->SocMaxCurrent); - dev_info(smu->adev->dev, "SocOffset = 0x%x\n", pptable->SocOffset); - dev_info(smu->adev->dev, "Padding_TelemetrySoc = 0x%x\n", pptable->Padding_TelemetrySoc); - - dev_info(smu->adev->dev, "Mem0MaxCurrent = 0x%x\n", pptable->Mem0MaxCurrent); - dev_info(smu->adev->dev, "Mem0Offset = 0x%x\n", pptable->Mem0Offset); - dev_info(smu->adev->dev, "Padding_TelemetryMem0 = 0x%x\n", pptable->Padding_TelemetryMem0); - - dev_info(smu->adev->dev, "Mem1MaxCurrent = 0x%x\n", pptable->Mem1MaxCurrent); - dev_info(smu->adev->dev, "Mem1Offset = 0x%x\n", pptable->Mem1Offset); - dev_info(smu->adev->dev, "Padding_TelemetryMem1 = 0x%x\n", pptable->Padding_TelemetryMem1); - - dev_info(smu->adev->dev, "MvddRatio = 0x%x\n", pptable->MvddRatio); - - dev_info(smu->adev->dev, "AcDcGpio = 0x%x\n", pptable->AcDcGpio); - dev_info(smu->adev->dev, "AcDcPolarity = 0x%x\n", pptable->AcDcPolarity); - dev_info(smu->adev->dev, "VR0HotGpio = 0x%x\n", pptable->VR0HotGpio); - dev_info(smu->adev->dev, "VR0HotPolarity = 0x%x\n", pptable->VR0HotPolarity); - dev_info(smu->adev->dev, "VR1HotGpio = 0x%x\n", pptable->VR1HotGpio); - dev_info(smu->adev->dev, "VR1HotPolarity = 0x%x\n", pptable->VR1HotPolarity); - dev_info(smu->adev->dev, "GthrGpio = 0x%x\n", pptable->GthrGpio); - dev_info(smu->adev->dev, "GthrPolarity = 0x%x\n", pptable->GthrPolarity); - dev_info(smu->adev->dev, "LedPin0 = 0x%x\n", pptable->LedPin0); - dev_info(smu->adev->dev, "LedPin1 = 0x%x\n", pptable->LedPin1); - dev_info(smu->adev->dev, "LedPin2 = 0x%x\n", pptable->LedPin2); - dev_info(smu->adev->dev, "LedEnableMask = 0x%x\n", pptable->LedEnableMask); - dev_info(smu->adev->dev, "LedPcie = 0x%x\n", pptable->LedPcie); - dev_info(smu->adev->dev, "LedError = 0x%x\n", pptable->LedError); - dev_info(smu->adev->dev, "LedSpare1[0] = 0x%x\n", pptable->LedSpare1[0]); - dev_info(smu->adev->dev, "LedSpare1[1] = 0x%x\n", pptable->LedSpare1[1]); - - dev_info(smu->adev->dev, "PllGfxclkSpreadEnabled = 0x%x\n", pptable->PllGfxclkSpreadEnabled); - dev_info(smu->adev->dev, "PllGfxclkSpreadPercent = 0x%x\n", pptable->PllGfxclkSpreadPercent); - dev_info(smu->adev->dev, "PllGfxclkSpreadFreq = 0x%x\n", pptable->PllGfxclkSpreadFreq); - - dev_info(smu->adev->dev, "DfllGfxclkSpreadEnabled = 0x%x\n", pptable->DfllGfxclkSpreadEnabled); - dev_info(smu->adev->dev, "DfllGfxclkSpreadPercent = 0x%x\n", pptable->DfllGfxclkSpreadPercent); - dev_info(smu->adev->dev, "DfllGfxclkSpreadFreq = 0x%x\n", pptable->DfllGfxclkSpreadFreq); - - dev_info(smu->adev->dev, "UclkSpreadPadding = 0x%x\n", pptable->UclkSpreadPadding); - dev_info(smu->adev->dev, "UclkSpreadFreq = 0x%x\n", pptable->UclkSpreadFreq); - - dev_info(smu->adev->dev, "FclkSpreadEnabled = 0x%x\n", pptable->FclkSpreadEnabled); - dev_info(smu->adev->dev, "FclkSpreadPercent = 0x%x\n", pptable->FclkSpreadPercent); - dev_info(smu->adev->dev, "FclkSpreadFreq = 0x%x\n", pptable->FclkSpreadFreq); - - dev_info(smu->adev->dev, "MemoryChannelEnabled = 0x%x\n", pptable->MemoryChannelEnabled); - dev_info(smu->adev->dev, "DramBitWidth = 0x%x\n", pptable->DramBitWidth); - dev_info(smu->adev->dev, "PaddingMem1[0] = 0x%x\n", pptable->PaddingMem1[0]); - dev_info(smu->adev->dev, "PaddingMem1[1] = 0x%x\n", pptable->PaddingMem1[1]); - dev_info(smu->adev->dev, "PaddingMem1[2] = 0x%x\n", pptable->PaddingMem1[2]); - - dev_info(smu->adev->dev, "TotalBoardPower = 0x%x\n", pptable->TotalBoardPower); - dev_info(smu->adev->dev, "BoardPowerPadding = 0x%x\n", pptable->BoardPowerPadding); - - dev_info(smu->adev->dev, "XgmiLinkSpeed\n"); - for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiLinkSpeed[i]); - dev_info(smu->adev->dev, "XgmiLinkWidth\n"); - for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiLinkWidth[i]); - dev_info(smu->adev->dev, "XgmiFclkFreq\n"); - for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiFclkFreq[i]); - dev_info(smu->adev->dev, "XgmiSocVoltage\n"); - for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiSocVoltage[i]); - - dev_info(smu->adev->dev, "HsrEnabled = 0x%x\n", pptable->HsrEnabled); - dev_info(smu->adev->dev, "VddqOffEnabled = 0x%x\n", pptable->VddqOffEnabled); - dev_info(smu->adev->dev, "PaddingUmcFlags[0] = 0x%x\n", pptable->PaddingUmcFlags[0]); - dev_info(smu->adev->dev, "PaddingUmcFlags[1] = 0x%x\n", pptable->PaddingUmcFlags[1]); - - dev_info(smu->adev->dev, "BoardReserved[0] = 0x%x\n", pptable->BoardReserved[0]); - dev_info(smu->adev->dev, "BoardReserved[1] = 0x%x\n", pptable->BoardReserved[1]); - dev_info(smu->adev->dev, "BoardReserved[2] = 0x%x\n", pptable->BoardReserved[2]); - dev_info(smu->adev->dev, "BoardReserved[3] = 0x%x\n", pptable->BoardReserved[3]); - dev_info(smu->adev->dev, "BoardReserved[4] = 0x%x\n", pptable->BoardReserved[4]); - dev_info(smu->adev->dev, "BoardReserved[5] = 0x%x\n", pptable->BoardReserved[5]); - dev_info(smu->adev->dev, "BoardReserved[6] = 0x%x\n", pptable->BoardReserved[6]); - dev_info(smu->adev->dev, "BoardReserved[7] = 0x%x\n", pptable->BoardReserved[7]); - dev_info(smu->adev->dev, "BoardReserved[8] = 0x%x\n", pptable->BoardReserved[8]); - dev_info(smu->adev->dev, "BoardReserved[9] = 0x%x\n", pptable->BoardReserved[9]); - dev_info(smu->adev->dev, "BoardReserved[10] = 0x%x\n", pptable->BoardReserved[10]); - - dev_info(smu->adev->dev, "MmHubPadding[0] = 0x%x\n", pptable->MmHubPadding[0]); - dev_info(smu->adev->dev, "MmHubPadding[1] = 0x%x\n", pptable->MmHubPadding[1]); - dev_info(smu->adev->dev, "MmHubPadding[2] = 0x%x\n", pptable->MmHubPadding[2]); - dev_info(smu->adev->dev, "MmHubPadding[3] = 0x%x\n", pptable->MmHubPadding[3]); - dev_info(smu->adev->dev, "MmHubPadding[4] = 0x%x\n", pptable->MmHubPadding[4]); - dev_info(smu->adev->dev, "MmHubPadding[5] = 0x%x\n", pptable->MmHubPadding[5]); - dev_info(smu->adev->dev, "MmHubPadding[6] = 0x%x\n", pptable->MmHubPadding[6]); - dev_info(smu->adev->dev, "MmHubPadding[7] = 0x%x\n", pptable->MmHubPadding[7]); -} - -static void sienna_cichlid_dump_pptable(struct smu_context *smu) -{ - struct smu_table_context *table_context = &smu->smu_table; - PPTable_t *pptable = table_context->driver_pptable; - int i; - - if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == - IP_VERSION(11, 0, 13)) { - beige_goby_dump_pptable(smu); - return; - } - - dev_info(smu->adev->dev, "Dumped PPTable:\n"); - - dev_info(smu->adev->dev, "Version = 0x%08x\n", pptable->Version); - dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", pptable->FeaturesToRun[0]); - dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", pptable->FeaturesToRun[1]); - - for (i = 0; i < PPT_THROTTLER_COUNT; i++) { - dev_info(smu->adev->dev, "SocketPowerLimitAc[%d] = 0x%x\n", i, pptable->SocketPowerLimitAc[i]); - dev_info(smu->adev->dev, "SocketPowerLimitAcTau[%d] = 0x%x\n", i, pptable->SocketPowerLimitAcTau[i]); - dev_info(smu->adev->dev, "SocketPowerLimitDc[%d] = 0x%x\n", i, pptable->SocketPowerLimitDc[i]); - dev_info(smu->adev->dev, "SocketPowerLimitDcTau[%d] = 0x%x\n", i, pptable->SocketPowerLimitDcTau[i]); - } - - for (i = 0; i < TDC_THROTTLER_COUNT; i++) { - dev_info(smu->adev->dev, "TdcLimit[%d] = 0x%x\n", i, pptable->TdcLimit[i]); - dev_info(smu->adev->dev, "TdcLimitTau[%d] = 0x%x\n", i, pptable->TdcLimitTau[i]); - } - - for (i = 0; i < TEMP_COUNT; i++) { - dev_info(smu->adev->dev, "TemperatureLimit[%d] = 0x%x\n", i, pptable->TemperatureLimit[i]); - } - - dev_info(smu->adev->dev, "FitLimit = 0x%x\n", pptable->FitLimit); - dev_info(smu->adev->dev, "TotalPowerConfig = 0x%x\n", pptable->TotalPowerConfig); - dev_info(smu->adev->dev, "TotalPowerPadding[0] = 0x%x\n", pptable->TotalPowerPadding[0]); - dev_info(smu->adev->dev, "TotalPowerPadding[1] = 0x%x\n", pptable->TotalPowerPadding[1]); - dev_info(smu->adev->dev, "TotalPowerPadding[2] = 0x%x\n", pptable->TotalPowerPadding[2]); - - dev_info(smu->adev->dev, "ApccPlusResidencyLimit = 0x%x\n", pptable->ApccPlusResidencyLimit); - for (i = 0; i < NUM_SMNCLK_DPM_LEVELS; i++) { - dev_info(smu->adev->dev, "SmnclkDpmFreq[%d] = 0x%x\n", i, pptable->SmnclkDpmFreq[i]); - dev_info(smu->adev->dev, "SmnclkDpmVoltage[%d] = 0x%x\n", i, pptable->SmnclkDpmVoltage[i]); - } - dev_info(smu->adev->dev, "ThrottlerControlMask = 0x%x\n", pptable->ThrottlerControlMask); - - dev_info(smu->adev->dev, "FwDStateMask = 0x%x\n", pptable->FwDStateMask); - - dev_info(smu->adev->dev, "UlvVoltageOffsetSoc = 0x%x\n", pptable->UlvVoltageOffsetSoc); - dev_info(smu->adev->dev, "UlvVoltageOffsetGfx = 0x%x\n", pptable->UlvVoltageOffsetGfx); - dev_info(smu->adev->dev, "MinVoltageUlvGfx = 0x%x\n", pptable->MinVoltageUlvGfx); - dev_info(smu->adev->dev, "MinVoltageUlvSoc = 0x%x\n", pptable->MinVoltageUlvSoc); - - dev_info(smu->adev->dev, "SocLIVmin = 0x%x\n", pptable->SocLIVmin); - dev_info(smu->adev->dev, "PaddingLIVmin = 0x%x\n", pptable->PaddingLIVmin); - - dev_info(smu->adev->dev, "GceaLinkMgrIdleThreshold = 0x%x\n", pptable->GceaLinkMgrIdleThreshold); - dev_info(smu->adev->dev, "paddingRlcUlvParams[0] = 0x%x\n", pptable->paddingRlcUlvParams[0]); - dev_info(smu->adev->dev, "paddingRlcUlvParams[1] = 0x%x\n", pptable->paddingRlcUlvParams[1]); - dev_info(smu->adev->dev, "paddingRlcUlvParams[2] = 0x%x\n", pptable->paddingRlcUlvParams[2]); - - dev_info(smu->adev->dev, "MinVoltageGfx = 0x%x\n", pptable->MinVoltageGfx); - dev_info(smu->adev->dev, "MinVoltageSoc = 0x%x\n", pptable->MinVoltageSoc); - dev_info(smu->adev->dev, "MaxVoltageGfx = 0x%x\n", pptable->MaxVoltageGfx); - dev_info(smu->adev->dev, "MaxVoltageSoc = 0x%x\n", pptable->MaxVoltageSoc); - - dev_info(smu->adev->dev, "LoadLineResistanceGfx = 0x%x\n", pptable->LoadLineResistanceGfx); - dev_info(smu->adev->dev, "LoadLineResistanceSoc = 0x%x\n", pptable->LoadLineResistanceSoc); - - dev_info(smu->adev->dev, "VDDGFX_TVmin = 0x%x\n", pptable->VDDGFX_TVmin); - dev_info(smu->adev->dev, "VDDSOC_TVmin = 0x%x\n", pptable->VDDSOC_TVmin); - dev_info(smu->adev->dev, "VDDGFX_Vmin_HiTemp = 0x%x\n", pptable->VDDGFX_Vmin_HiTemp); - dev_info(smu->adev->dev, "VDDGFX_Vmin_LoTemp = 0x%x\n", pptable->VDDGFX_Vmin_LoTemp); - dev_info(smu->adev->dev, "VDDSOC_Vmin_HiTemp = 0x%x\n", pptable->VDDSOC_Vmin_HiTemp); - dev_info(smu->adev->dev, "VDDSOC_Vmin_LoTemp = 0x%x\n", pptable->VDDSOC_Vmin_LoTemp); - dev_info(smu->adev->dev, "VDDGFX_TVminHystersis = 0x%x\n", pptable->VDDGFX_TVminHystersis); - dev_info(smu->adev->dev, "VDDSOC_TVminHystersis = 0x%x\n", pptable->VDDSOC_TVminHystersis); - - dev_info(smu->adev->dev, "[PPCLK_GFXCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_GFXCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_GFXCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_GFXCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_GFXCLK].Padding, - pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_GFXCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_SOCCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_SOCCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_SOCCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_SOCCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_SOCCLK].Padding, - pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_SOCCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_UCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_UCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_UCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_UCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_UCLK].Padding, - pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_UCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_UCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_FCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_FCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_FCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_FCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_FCLK].Padding, - pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_FCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_FCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_DCLK_0]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_DCLK_0].VoltageMode, - pptable->DpmDescriptor[PPCLK_DCLK_0].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_DCLK_0].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_DCLK_0].Padding, - pptable->DpmDescriptor[PPCLK_DCLK_0].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_DCLK_0].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_DCLK_0].SsCurve.a, - pptable->DpmDescriptor[PPCLK_DCLK_0].SsCurve.b, - pptable->DpmDescriptor[PPCLK_DCLK_0].SsCurve.c, - pptable->DpmDescriptor[PPCLK_DCLK_0].SsFmin, - pptable->DpmDescriptor[PPCLK_DCLK_0].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_VCLK_0]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_VCLK_0].VoltageMode, - pptable->DpmDescriptor[PPCLK_VCLK_0].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_VCLK_0].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_VCLK_0].Padding, - pptable->DpmDescriptor[PPCLK_VCLK_0].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_VCLK_0].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_VCLK_0].SsCurve.a, - pptable->DpmDescriptor[PPCLK_VCLK_0].SsCurve.b, - pptable->DpmDescriptor[PPCLK_VCLK_0].SsCurve.c, - pptable->DpmDescriptor[PPCLK_VCLK_0].SsFmin, - pptable->DpmDescriptor[PPCLK_VCLK_0].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_DCLK_1]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_DCLK_1].VoltageMode, - pptable->DpmDescriptor[PPCLK_DCLK_1].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_DCLK_1].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_DCLK_1].Padding, - pptable->DpmDescriptor[PPCLK_DCLK_1].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_DCLK_1].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_DCLK_1].SsCurve.a, - pptable->DpmDescriptor[PPCLK_DCLK_1].SsCurve.b, - pptable->DpmDescriptor[PPCLK_DCLK_1].SsCurve.c, - pptable->DpmDescriptor[PPCLK_DCLK_1].SsFmin, - pptable->DpmDescriptor[PPCLK_DCLK_1].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_VCLK_1]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_VCLK_1].VoltageMode, - pptable->DpmDescriptor[PPCLK_VCLK_1].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_VCLK_1].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_VCLK_1].Padding, - pptable->DpmDescriptor[PPCLK_VCLK_1].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_VCLK_1].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_VCLK_1].SsCurve.a, - pptable->DpmDescriptor[PPCLK_VCLK_1].SsCurve.b, - pptable->DpmDescriptor[PPCLK_VCLK_1].SsCurve.c, - pptable->DpmDescriptor[PPCLK_VCLK_1].SsFmin, - pptable->DpmDescriptor[PPCLK_VCLK_1].Padding16); - - dev_info(smu->adev->dev, "FreqTableGfx\n"); - for (i = 0; i < NUM_GFXCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableGfx[i]); - - dev_info(smu->adev->dev, "FreqTableVclk\n"); - for (i = 0; i < NUM_VCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableVclk[i]); - - dev_info(smu->adev->dev, "FreqTableDclk\n"); - for (i = 0; i < NUM_DCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableDclk[i]); - - dev_info(smu->adev->dev, "FreqTableSocclk\n"); - for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableSocclk[i]); - - dev_info(smu->adev->dev, "FreqTableUclk\n"); - for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableUclk[i]); - - dev_info(smu->adev->dev, "FreqTableFclk\n"); - for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableFclk[i]); - - dev_info(smu->adev->dev, "DcModeMaxFreq\n"); - dev_info(smu->adev->dev, " .PPCLK_GFXCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_GFXCLK]); - dev_info(smu->adev->dev, " .PPCLK_SOCCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_SOCCLK]); - dev_info(smu->adev->dev, " .PPCLK_UCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_UCLK]); - dev_info(smu->adev->dev, " .PPCLK_FCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_FCLK]); - dev_info(smu->adev->dev, " .PPCLK_DCLK_0 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_DCLK_0]); - dev_info(smu->adev->dev, " .PPCLK_VCLK_0 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_VCLK_0]); - dev_info(smu->adev->dev, " .PPCLK_DCLK_1 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_DCLK_1]); - dev_info(smu->adev->dev, " .PPCLK_VCLK_1 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_VCLK_1]); - - dev_info(smu->adev->dev, "FreqTableUclkDiv\n"); - for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->FreqTableUclkDiv[i]); - - dev_info(smu->adev->dev, "FclkBoostFreq = 0x%x\n", pptable->FclkBoostFreq); - dev_info(smu->adev->dev, "FclkParamPadding = 0x%x\n", pptable->FclkParamPadding); - - dev_info(smu->adev->dev, "Mp0clkFreq\n"); - for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->Mp0clkFreq[i]); - - dev_info(smu->adev->dev, "Mp0DpmVoltage\n"); - for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->Mp0DpmVoltage[i]); - - dev_info(smu->adev->dev, "MemVddciVoltage\n"); - for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->MemVddciVoltage[i]); - - dev_info(smu->adev->dev, "MemMvddVoltage\n"); - for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->MemMvddVoltage[i]); - - dev_info(smu->adev->dev, "GfxclkFgfxoffEntry = 0x%x\n", pptable->GfxclkFgfxoffEntry); - dev_info(smu->adev->dev, "GfxclkFinit = 0x%x\n", pptable->GfxclkFinit); - dev_info(smu->adev->dev, "GfxclkFidle = 0x%x\n", pptable->GfxclkFidle); - dev_info(smu->adev->dev, "GfxclkSource = 0x%x\n", pptable->GfxclkSource); - dev_info(smu->adev->dev, "GfxclkPadding = 0x%x\n", pptable->GfxclkPadding); - - dev_info(smu->adev->dev, "GfxGpoSubFeatureMask = 0x%x\n", pptable->GfxGpoSubFeatureMask); - - dev_info(smu->adev->dev, "GfxGpoEnabledWorkPolicyMask = 0x%x\n", pptable->GfxGpoEnabledWorkPolicyMask); - dev_info(smu->adev->dev, "GfxGpoDisabledWorkPolicyMask = 0x%x\n", pptable->GfxGpoDisabledWorkPolicyMask); - dev_info(smu->adev->dev, "GfxGpoPadding[0] = 0x%x\n", pptable->GfxGpoPadding[0]); - dev_info(smu->adev->dev, "GfxGpoVotingAllow = 0x%x\n", pptable->GfxGpoVotingAllow); - dev_info(smu->adev->dev, "GfxGpoPadding32[0] = 0x%x\n", pptable->GfxGpoPadding32[0]); - dev_info(smu->adev->dev, "GfxGpoPadding32[1] = 0x%x\n", pptable->GfxGpoPadding32[1]); - dev_info(smu->adev->dev, "GfxGpoPadding32[2] = 0x%x\n", pptable->GfxGpoPadding32[2]); - dev_info(smu->adev->dev, "GfxGpoPadding32[3] = 0x%x\n", pptable->GfxGpoPadding32[3]); - dev_info(smu->adev->dev, "GfxDcsFopt = 0x%x\n", pptable->GfxDcsFopt); - dev_info(smu->adev->dev, "GfxDcsFclkFopt = 0x%x\n", pptable->GfxDcsFclkFopt); - dev_info(smu->adev->dev, "GfxDcsUclkFopt = 0x%x\n", pptable->GfxDcsUclkFopt); - - dev_info(smu->adev->dev, "DcsGfxOffVoltage = 0x%x\n", pptable->DcsGfxOffVoltage); - dev_info(smu->adev->dev, "DcsMinGfxOffTime = 0x%x\n", pptable->DcsMinGfxOffTime); - dev_info(smu->adev->dev, "DcsMaxGfxOffTime = 0x%x\n", pptable->DcsMaxGfxOffTime); - dev_info(smu->adev->dev, "DcsMinCreditAccum = 0x%x\n", pptable->DcsMinCreditAccum); - dev_info(smu->adev->dev, "DcsExitHysteresis = 0x%x\n", pptable->DcsExitHysteresis); - dev_info(smu->adev->dev, "DcsTimeout = 0x%x\n", pptable->DcsTimeout); - - dev_info(smu->adev->dev, "DcsParamPadding[0] = 0x%x\n", pptable->DcsParamPadding[0]); - dev_info(smu->adev->dev, "DcsParamPadding[1] = 0x%x\n", pptable->DcsParamPadding[1]); - dev_info(smu->adev->dev, "DcsParamPadding[2] = 0x%x\n", pptable->DcsParamPadding[2]); - dev_info(smu->adev->dev, "DcsParamPadding[3] = 0x%x\n", pptable->DcsParamPadding[3]); - dev_info(smu->adev->dev, "DcsParamPadding[4] = 0x%x\n", pptable->DcsParamPadding[4]); - - dev_info(smu->adev->dev, "FlopsPerByteTable\n"); - for (i = 0; i < RLC_PACE_TABLE_NUM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->FlopsPerByteTable[i]); - - dev_info(smu->adev->dev, "LowestUclkReservedForUlv = 0x%x\n", pptable->LowestUclkReservedForUlv); - dev_info(smu->adev->dev, "vddingMem[0] = 0x%x\n", pptable->PaddingMem[0]); - dev_info(smu->adev->dev, "vddingMem[1] = 0x%x\n", pptable->PaddingMem[1]); - dev_info(smu->adev->dev, "vddingMem[2] = 0x%x\n", pptable->PaddingMem[2]); - - dev_info(smu->adev->dev, "UclkDpmPstates\n"); - for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->UclkDpmPstates[i]); - - dev_info(smu->adev->dev, "UclkDpmSrcFreqRange\n"); - dev_info(smu->adev->dev, " .Fmin = 0x%x\n", - pptable->UclkDpmSrcFreqRange.Fmin); - dev_info(smu->adev->dev, " .Fmax = 0x%x\n", - pptable->UclkDpmSrcFreqRange.Fmax); - dev_info(smu->adev->dev, "UclkDpmTargFreqRange\n"); - dev_info(smu->adev->dev, " .Fmin = 0x%x\n", - pptable->UclkDpmTargFreqRange.Fmin); - dev_info(smu->adev->dev, " .Fmax = 0x%x\n", - pptable->UclkDpmTargFreqRange.Fmax); - dev_info(smu->adev->dev, "UclkDpmMidstepFreq = 0x%x\n", pptable->UclkDpmMidstepFreq); - dev_info(smu->adev->dev, "UclkMidstepPadding = 0x%x\n", pptable->UclkMidstepPadding); - - dev_info(smu->adev->dev, "PcieGenSpeed\n"); - for (i = 0; i < NUM_LINK_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->PcieGenSpeed[i]); - - dev_info(smu->adev->dev, "PcieLaneCount\n"); - for (i = 0; i < NUM_LINK_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->PcieLaneCount[i]); - - dev_info(smu->adev->dev, "LclkFreq\n"); - for (i = 0; i < NUM_LINK_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->LclkFreq[i]); - - dev_info(smu->adev->dev, "FanStopTemp = 0x%x\n", pptable->FanStopTemp); - dev_info(smu->adev->dev, "FanStartTemp = 0x%x\n", pptable->FanStartTemp); - - dev_info(smu->adev->dev, "FanGain\n"); - for (i = 0; i < TEMP_COUNT; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->FanGain[i]); - - dev_info(smu->adev->dev, "FanPwmMin = 0x%x\n", pptable->FanPwmMin); - dev_info(smu->adev->dev, "FanAcousticLimitRpm = 0x%x\n", pptable->FanAcousticLimitRpm); - dev_info(smu->adev->dev, "FanThrottlingRpm = 0x%x\n", pptable->FanThrottlingRpm); - dev_info(smu->adev->dev, "FanMaximumRpm = 0x%x\n", pptable->FanMaximumRpm); - dev_info(smu->adev->dev, "MGpuFanBoostLimitRpm = 0x%x\n", pptable->MGpuFanBoostLimitRpm); - dev_info(smu->adev->dev, "FanTargetTemperature = 0x%x\n", pptable->FanTargetTemperature); - dev_info(smu->adev->dev, "FanTargetGfxclk = 0x%x\n", pptable->FanTargetGfxclk); - dev_info(smu->adev->dev, "FanPadding16 = 0x%x\n", pptable->FanPadding16); - dev_info(smu->adev->dev, "FanTempInputSelect = 0x%x\n", pptable->FanTempInputSelect); - dev_info(smu->adev->dev, "FanPadding = 0x%x\n", pptable->FanPadding); - dev_info(smu->adev->dev, "FanZeroRpmEnable = 0x%x\n", pptable->FanZeroRpmEnable); - dev_info(smu->adev->dev, "FanTachEdgePerRev = 0x%x\n", pptable->FanTachEdgePerRev); - - dev_info(smu->adev->dev, "FuzzyFan_ErrorSetDelta = 0x%x\n", pptable->FuzzyFan_ErrorSetDelta); - dev_info(smu->adev->dev, "FuzzyFan_ErrorRateSetDelta = 0x%x\n", pptable->FuzzyFan_ErrorRateSetDelta); - dev_info(smu->adev->dev, "FuzzyFan_PwmSetDelta = 0x%x\n", pptable->FuzzyFan_PwmSetDelta); - dev_info(smu->adev->dev, "FuzzyFan_Reserved = 0x%x\n", pptable->FuzzyFan_Reserved); - - dev_info(smu->adev->dev, "OverrideAvfsGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "OverrideAvfsGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_SOC]); - dev_info(smu->adev->dev, "dBtcGbGfxDfllModelSelect = 0x%x\n", pptable->dBtcGbGfxDfllModelSelect); - dev_info(smu->adev->dev, "Padding8_Avfs = 0x%x\n", pptable->Padding8_Avfs); - - dev_info(smu->adev->dev, "qAvfsGb[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qAvfsGb[AVFS_VOLTAGE_GFX].a, - pptable->qAvfsGb[AVFS_VOLTAGE_GFX].b, - pptable->qAvfsGb[AVFS_VOLTAGE_GFX].c); - dev_info(smu->adev->dev, "qAvfsGb[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qAvfsGb[AVFS_VOLTAGE_SOC].a, - pptable->qAvfsGb[AVFS_VOLTAGE_SOC].b, - pptable->qAvfsGb[AVFS_VOLTAGE_SOC].c); - dev_info(smu->adev->dev, "dBtcGbGfxPll{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbGfxPll.a, - pptable->dBtcGbGfxPll.b, - pptable->dBtcGbGfxPll.c); - dev_info(smu->adev->dev, "dBtcGbGfxAfll{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbGfxDfll.a, - pptable->dBtcGbGfxDfll.b, - pptable->dBtcGbGfxDfll.c); - dev_info(smu->adev->dev, "dBtcGbSoc{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbSoc.a, - pptable->dBtcGbSoc.b, - pptable->dBtcGbSoc.c); - dev_info(smu->adev->dev, "qAgingGb[AVFS_VOLTAGE_GFX]{m = 0x%x b = 0x%x}\n", - pptable->qAgingGb[AVFS_VOLTAGE_GFX].m, - pptable->qAgingGb[AVFS_VOLTAGE_GFX].b); - dev_info(smu->adev->dev, "qAgingGb[AVFS_VOLTAGE_SOC]{m = 0x%x b = 0x%x}\n", - pptable->qAgingGb[AVFS_VOLTAGE_SOC].m, - pptable->qAgingGb[AVFS_VOLTAGE_SOC].b); - - dev_info(smu->adev->dev, "PiecewiseLinearDroopIntGfxDfll\n"); - for (i = 0; i < NUM_PIECE_WISE_LINEAR_DROOP_MODEL_VF_POINTS; i++) { - dev_info(smu->adev->dev, " Fset[%d] = 0x%x\n", - i, pptable->PiecewiseLinearDroopIntGfxDfll.Fset[i]); - dev_info(smu->adev->dev, " Vdroop[%d] = 0x%x\n", - i, pptable->PiecewiseLinearDroopIntGfxDfll.Vdroop[i]); - } - - dev_info(smu->adev->dev, "qStaticVoltageOffset[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].a, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].b, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].c); - dev_info(smu->adev->dev, "qStaticVoltageOffset[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].a, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].b, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].c); - - dev_info(smu->adev->dev, "DcTol[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcTol[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_SOC]); - - dev_info(smu->adev->dev, "DcBtcEnabled[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcBtcEnabled[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_SOC]); - dev_info(smu->adev->dev, "Padding8_GfxBtc[0] = 0x%x\n", pptable->Padding8_GfxBtc[0]); - dev_info(smu->adev->dev, "Padding8_GfxBtc[1] = 0x%x\n", pptable->Padding8_GfxBtc[1]); - - dev_info(smu->adev->dev, "DcBtcMin[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcBtcMin[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_SOC]); - dev_info(smu->adev->dev, "DcBtcMax[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcBtcMax[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_SOC]); - - dev_info(smu->adev->dev, "DcBtcGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcBtcGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_SOC]); - - dev_info(smu->adev->dev, "XgmiDpmPstates\n"); - for (i = 0; i < NUM_XGMI_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiDpmPstates[i]); - dev_info(smu->adev->dev, "XgmiDpmSpare[0] = 0x%02x\n", pptable->XgmiDpmSpare[0]); - dev_info(smu->adev->dev, "XgmiDpmSpare[1] = 0x%02x\n", pptable->XgmiDpmSpare[1]); - - dev_info(smu->adev->dev, "DebugOverrides = 0x%x\n", pptable->DebugOverrides); - dev_info(smu->adev->dev, "ReservedEquation0{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation0.a, - pptable->ReservedEquation0.b, - pptable->ReservedEquation0.c); - dev_info(smu->adev->dev, "ReservedEquation1{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation1.a, - pptable->ReservedEquation1.b, - pptable->ReservedEquation1.c); - dev_info(smu->adev->dev, "ReservedEquation2{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation2.a, - pptable->ReservedEquation2.b, - pptable->ReservedEquation2.c); - dev_info(smu->adev->dev, "ReservedEquation3{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation3.a, - pptable->ReservedEquation3.b, - pptable->ReservedEquation3.c); - - dev_info(smu->adev->dev, "SkuReserved[0] = 0x%x\n", pptable->SkuReserved[0]); - dev_info(smu->adev->dev, "SkuReserved[1] = 0x%x\n", pptable->SkuReserved[1]); - dev_info(smu->adev->dev, "SkuReserved[2] = 0x%x\n", pptable->SkuReserved[2]); - dev_info(smu->adev->dev, "SkuReserved[3] = 0x%x\n", pptable->SkuReserved[3]); - dev_info(smu->adev->dev, "SkuReserved[4] = 0x%x\n", pptable->SkuReserved[4]); - dev_info(smu->adev->dev, "SkuReserved[5] = 0x%x\n", pptable->SkuReserved[5]); - dev_info(smu->adev->dev, "SkuReserved[6] = 0x%x\n", pptable->SkuReserved[6]); - dev_info(smu->adev->dev, "SkuReserved[7] = 0x%x\n", pptable->SkuReserved[7]); - - dev_info(smu->adev->dev, "GamingClk[0] = 0x%x\n", pptable->GamingClk[0]); - dev_info(smu->adev->dev, "GamingClk[1] = 0x%x\n", pptable->GamingClk[1]); - dev_info(smu->adev->dev, "GamingClk[2] = 0x%x\n", pptable->GamingClk[2]); - dev_info(smu->adev->dev, "GamingClk[3] = 0x%x\n", pptable->GamingClk[3]); - dev_info(smu->adev->dev, "GamingClk[4] = 0x%x\n", pptable->GamingClk[4]); - dev_info(smu->adev->dev, "GamingClk[5] = 0x%x\n", pptable->GamingClk[5]); - - for (i = 0; i < NUM_I2C_CONTROLLERS; i++) { - dev_info(smu->adev->dev, "I2cControllers[%d]:\n", i); - dev_info(smu->adev->dev, " .Enabled = 0x%x\n", - pptable->I2cControllers[i].Enabled); - dev_info(smu->adev->dev, " .Speed = 0x%x\n", - pptable->I2cControllers[i].Speed); - dev_info(smu->adev->dev, " .SlaveAddress = 0x%x\n", - pptable->I2cControllers[i].SlaveAddress); - dev_info(smu->adev->dev, " .ControllerPort = 0x%x\n", - pptable->I2cControllers[i].ControllerPort); - dev_info(smu->adev->dev, " .ControllerName = 0x%x\n", - pptable->I2cControllers[i].ControllerName); - dev_info(smu->adev->dev, " .ThermalThrottler = 0x%x\n", - pptable->I2cControllers[i].ThermalThrotter); - dev_info(smu->adev->dev, " .I2cProtocol = 0x%x\n", - pptable->I2cControllers[i].I2cProtocol); - dev_info(smu->adev->dev, " .PaddingConfig = 0x%x\n", - pptable->I2cControllers[i].PaddingConfig); - } - - dev_info(smu->adev->dev, "GpioScl = 0x%x\n", pptable->GpioScl); - dev_info(smu->adev->dev, "GpioSda = 0x%x\n", pptable->GpioSda); - dev_info(smu->adev->dev, "FchUsbPdSlaveAddr = 0x%x\n", pptable->FchUsbPdSlaveAddr); - dev_info(smu->adev->dev, "I2cSpare[0] = 0x%x\n", pptable->I2cSpare[0]); - - dev_info(smu->adev->dev, "Board Parameters:\n"); - dev_info(smu->adev->dev, "VddGfxVrMapping = 0x%x\n", pptable->VddGfxVrMapping); - dev_info(smu->adev->dev, "VddSocVrMapping = 0x%x\n", pptable->VddSocVrMapping); - dev_info(smu->adev->dev, "VddMem0VrMapping = 0x%x\n", pptable->VddMem0VrMapping); - dev_info(smu->adev->dev, "VddMem1VrMapping = 0x%x\n", pptable->VddMem1VrMapping); - dev_info(smu->adev->dev, "GfxUlvPhaseSheddingMask = 0x%x\n", pptable->GfxUlvPhaseSheddingMask); - dev_info(smu->adev->dev, "SocUlvPhaseSheddingMask = 0x%x\n", pptable->SocUlvPhaseSheddingMask); - dev_info(smu->adev->dev, "VddciUlvPhaseSheddingMask = 0x%x\n", pptable->VddciUlvPhaseSheddingMask); - dev_info(smu->adev->dev, "MvddUlvPhaseSheddingMask = 0x%x\n", pptable->MvddUlvPhaseSheddingMask); - - dev_info(smu->adev->dev, "GfxMaxCurrent = 0x%x\n", pptable->GfxMaxCurrent); - dev_info(smu->adev->dev, "GfxOffset = 0x%x\n", pptable->GfxOffset); - dev_info(smu->adev->dev, "Padding_TelemetryGfx = 0x%x\n", pptable->Padding_TelemetryGfx); - - dev_info(smu->adev->dev, "SocMaxCurrent = 0x%x\n", pptable->SocMaxCurrent); - dev_info(smu->adev->dev, "SocOffset = 0x%x\n", pptable->SocOffset); - dev_info(smu->adev->dev, "Padding_TelemetrySoc = 0x%x\n", pptable->Padding_TelemetrySoc); - - dev_info(smu->adev->dev, "Mem0MaxCurrent = 0x%x\n", pptable->Mem0MaxCurrent); - dev_info(smu->adev->dev, "Mem0Offset = 0x%x\n", pptable->Mem0Offset); - dev_info(smu->adev->dev, "Padding_TelemetryMem0 = 0x%x\n", pptable->Padding_TelemetryMem0); - - dev_info(smu->adev->dev, "Mem1MaxCurrent = 0x%x\n", pptable->Mem1MaxCurrent); - dev_info(smu->adev->dev, "Mem1Offset = 0x%x\n", pptable->Mem1Offset); - dev_info(smu->adev->dev, "Padding_TelemetryMem1 = 0x%x\n", pptable->Padding_TelemetryMem1); - - dev_info(smu->adev->dev, "MvddRatio = 0x%x\n", pptable->MvddRatio); - - dev_info(smu->adev->dev, "AcDcGpio = 0x%x\n", pptable->AcDcGpio); - dev_info(smu->adev->dev, "AcDcPolarity = 0x%x\n", pptable->AcDcPolarity); - dev_info(smu->adev->dev, "VR0HotGpio = 0x%x\n", pptable->VR0HotGpio); - dev_info(smu->adev->dev, "VR0HotPolarity = 0x%x\n", pptable->VR0HotPolarity); - dev_info(smu->adev->dev, "VR1HotGpio = 0x%x\n", pptable->VR1HotGpio); - dev_info(smu->adev->dev, "VR1HotPolarity = 0x%x\n", pptable->VR1HotPolarity); - dev_info(smu->adev->dev, "GthrGpio = 0x%x\n", pptable->GthrGpio); - dev_info(smu->adev->dev, "GthrPolarity = 0x%x\n", pptable->GthrPolarity); - dev_info(smu->adev->dev, "LedPin0 = 0x%x\n", pptable->LedPin0); - dev_info(smu->adev->dev, "LedPin1 = 0x%x\n", pptable->LedPin1); - dev_info(smu->adev->dev, "LedPin2 = 0x%x\n", pptable->LedPin2); - dev_info(smu->adev->dev, "LedEnableMask = 0x%x\n", pptable->LedEnableMask); - dev_info(smu->adev->dev, "LedPcie = 0x%x\n", pptable->LedPcie); - dev_info(smu->adev->dev, "LedError = 0x%x\n", pptable->LedError); - dev_info(smu->adev->dev, "LedSpare1[0] = 0x%x\n", pptable->LedSpare1[0]); - dev_info(smu->adev->dev, "LedSpare1[1] = 0x%x\n", pptable->LedSpare1[1]); - - dev_info(smu->adev->dev, "PllGfxclkSpreadEnabled = 0x%x\n", pptable->PllGfxclkSpreadEnabled); - dev_info(smu->adev->dev, "PllGfxclkSpreadPercent = 0x%x\n", pptable->PllGfxclkSpreadPercent); - dev_info(smu->adev->dev, "PllGfxclkSpreadFreq = 0x%x\n", pptable->PllGfxclkSpreadFreq); - - dev_info(smu->adev->dev, "DfllGfxclkSpreadEnabled = 0x%x\n", pptable->DfllGfxclkSpreadEnabled); - dev_info(smu->adev->dev, "DfllGfxclkSpreadPercent = 0x%x\n", pptable->DfllGfxclkSpreadPercent); - dev_info(smu->adev->dev, "DfllGfxclkSpreadFreq = 0x%x\n", pptable->DfllGfxclkSpreadFreq); - - dev_info(smu->adev->dev, "UclkSpreadPadding = 0x%x\n", pptable->UclkSpreadPadding); - dev_info(smu->adev->dev, "UclkSpreadFreq = 0x%x\n", pptable->UclkSpreadFreq); - - dev_info(smu->adev->dev, "FclkSpreadEnabled = 0x%x\n", pptable->FclkSpreadEnabled); - dev_info(smu->adev->dev, "FclkSpreadPercent = 0x%x\n", pptable->FclkSpreadPercent); - dev_info(smu->adev->dev, "FclkSpreadFreq = 0x%x\n", pptable->FclkSpreadFreq); - - dev_info(smu->adev->dev, "MemoryChannelEnabled = 0x%x\n", pptable->MemoryChannelEnabled); - dev_info(smu->adev->dev, "DramBitWidth = 0x%x\n", pptable->DramBitWidth); - dev_info(smu->adev->dev, "PaddingMem1[0] = 0x%x\n", pptable->PaddingMem1[0]); - dev_info(smu->adev->dev, "PaddingMem1[1] = 0x%x\n", pptable->PaddingMem1[1]); - dev_info(smu->adev->dev, "PaddingMem1[2] = 0x%x\n", pptable->PaddingMem1[2]); - - dev_info(smu->adev->dev, "TotalBoardPower = 0x%x\n", pptable->TotalBoardPower); - dev_info(smu->adev->dev, "BoardPowerPadding = 0x%x\n", pptable->BoardPowerPadding); - - dev_info(smu->adev->dev, "XgmiLinkSpeed\n"); - for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiLinkSpeed[i]); - dev_info(smu->adev->dev, "XgmiLinkWidth\n"); - for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiLinkWidth[i]); - dev_info(smu->adev->dev, "XgmiFclkFreq\n"); - for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiFclkFreq[i]); - dev_info(smu->adev->dev, "XgmiSocVoltage\n"); - for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiSocVoltage[i]); - - dev_info(smu->adev->dev, "HsrEnabled = 0x%x\n", pptable->HsrEnabled); - dev_info(smu->adev->dev, "VddqOffEnabled = 0x%x\n", pptable->VddqOffEnabled); - dev_info(smu->adev->dev, "PaddingUmcFlags[0] = 0x%x\n", pptable->PaddingUmcFlags[0]); - dev_info(smu->adev->dev, "PaddingUmcFlags[1] = 0x%x\n", pptable->PaddingUmcFlags[1]); - - dev_info(smu->adev->dev, "BoardReserved[0] = 0x%x\n", pptable->BoardReserved[0]); - dev_info(smu->adev->dev, "BoardReserved[1] = 0x%x\n", pptable->BoardReserved[1]); - dev_info(smu->adev->dev, "BoardReserved[2] = 0x%x\n", pptable->BoardReserved[2]); - dev_info(smu->adev->dev, "BoardReserved[3] = 0x%x\n", pptable->BoardReserved[3]); - dev_info(smu->adev->dev, "BoardReserved[4] = 0x%x\n", pptable->BoardReserved[4]); - dev_info(smu->adev->dev, "BoardReserved[5] = 0x%x\n", pptable->BoardReserved[5]); - dev_info(smu->adev->dev, "BoardReserved[6] = 0x%x\n", pptable->BoardReserved[6]); - dev_info(smu->adev->dev, "BoardReserved[7] = 0x%x\n", pptable->BoardReserved[7]); - dev_info(smu->adev->dev, "BoardReserved[8] = 0x%x\n", pptable->BoardReserved[8]); - dev_info(smu->adev->dev, "BoardReserved[9] = 0x%x\n", pptable->BoardReserved[9]); - dev_info(smu->adev->dev, "BoardReserved[10] = 0x%x\n", pptable->BoardReserved[10]); - - dev_info(smu->adev->dev, "MmHubPadding[0] = 0x%x\n", pptable->MmHubPadding[0]); - dev_info(smu->adev->dev, "MmHubPadding[1] = 0x%x\n", pptable->MmHubPadding[1]); - dev_info(smu->adev->dev, "MmHubPadding[2] = 0x%x\n", pptable->MmHubPadding[2]); - dev_info(smu->adev->dev, "MmHubPadding[3] = 0x%x\n", pptable->MmHubPadding[3]); - dev_info(smu->adev->dev, "MmHubPadding[4] = 0x%x\n", pptable->MmHubPadding[4]); - dev_info(smu->adev->dev, "MmHubPadding[5] = 0x%x\n", pptable->MmHubPadding[5]); - dev_info(smu->adev->dev, "MmHubPadding[6] = 0x%x\n", pptable->MmHubPadding[6]); - dev_info(smu->adev->dev, "MmHubPadding[7] = 0x%x\n", pptable->MmHubPadding[7]); -} - static int sienna_cichlid_i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msg, int num_msgs) { @@ -4397,7 +3167,6 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = { .display_disable_memory_clock_switch = sienna_cichlid_display_disable_memory_clock_switch, .get_power_limit = sienna_cichlid_get_power_limit, .update_pcie_parameters = sienna_cichlid_update_pcie_parameters, - .dump_pptable = sienna_cichlid_dump_pptable, .init_microcode = smu_v11_0_init_microcode, .load_microcode = smu_v11_0_load_microcode, .fini_microcode = smu_v11_0_fini_microcode, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index 16fcd9dcd202..480cf3cb204d 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -1616,7 +1616,8 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) break; default: if (!ras || !adev->ras_enabled || - adev->gmc.xgmi.pending_reset) { + (adev->init_lvl->level == + AMDGPU_INIT_LEVEL_MINIMAL_XGMI)) { if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 2)) { data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL_ARCT); @@ -1763,7 +1764,8 @@ failed: int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, - uint32_t max) + uint32_t max, + bool automatic) { int ret = 0, clk_id = 0; uint32_t param; @@ -1778,7 +1780,10 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, return clk_id; if (max > 0) { - param = (uint32_t)((clk_id << 16) | (max & 0xffff)); + if (automatic) + param = (uint32_t)((clk_id << 16) | 0xffff); + else + param = (uint32_t)((clk_id << 16) | (max & 0xffff)); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq, param, NULL); if (ret) @@ -1786,7 +1791,10 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, } if (min > 0) { - param = (uint32_t)((clk_id << 16) | (min & 0xffff)); + if (automatic) + param = (uint32_t)((clk_id << 16) | 0); + else + param = (uint32_t)((clk_id << 16) | (min & 0xffff)); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq, param, NULL); if (ret) @@ -1854,6 +1862,7 @@ int smu_v11_0_set_performance_level(struct smu_context *smu, uint32_t mclk_min = 0, mclk_max = 0; uint32_t socclk_min = 0, socclk_max = 0; int ret = 0; + bool auto_level = false; switch (level) { case AMD_DPM_FORCED_LEVEL_HIGH: @@ -1873,6 +1882,7 @@ int smu_v11_0_set_performance_level(struct smu_context *smu, mclk_max = mem_table->max; socclk_min = soc_table->min; socclk_max = soc_table->max; + auto_level = true; break; case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: sclk_min = sclk_max = pstate_table->gfxclk_pstate.standard; @@ -1905,13 +1915,15 @@ int smu_v11_0_set_performance_level(struct smu_context *smu, if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 2)) { mclk_min = mclk_max = 0; socclk_min = socclk_max = 0; + auto_level = false; } if (sclk_min && sclk_max) { ret = smu_v11_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, sclk_min, - sclk_max); + sclk_max, + auto_level); if (ret) return ret; } @@ -1920,7 +1932,8 @@ int smu_v11_0_set_performance_level(struct smu_context *smu, ret = smu_v11_0_set_soft_freq_limited_range(smu, SMU_MCLK, mclk_min, - mclk_max); + mclk_max, + auto_level); if (ret) return ret; } @@ -1929,7 +1942,8 @@ int smu_v11_0_set_performance_level(struct smu_context *smu, ret = smu_v11_0_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_min, - socclk_max); + socclk_max, + auto_level); if (ret) return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index 22737b11b1bf..a55ea76d7399 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -242,7 +242,9 @@ static int vangogh_tables_init(struct smu_context *smu) goto err0_out; smu_table->metrics_time = 0; - smu_table->gpu_metrics_table_size = max(sizeof(struct gpu_metrics_v2_3), sizeof(struct gpu_metrics_v2_2)); + smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2); + smu_table->gpu_metrics_table_size = max(smu_table->gpu_metrics_table_size, sizeof(struct gpu_metrics_v2_3)); + smu_table->gpu_metrics_table_size = max(smu_table->gpu_metrics_table_size, sizeof(struct gpu_metrics_v2_4)); smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); if (!smu_table->gpu_metrics_table) goto err1_out; @@ -459,7 +461,9 @@ static int vangogh_init_smc_tables(struct smu_context *smu) return smu_v11_0_init_smc_tables(smu); } -static int vangogh_dpm_set_vcn_enable(struct smu_context *smu, bool enable) +static int vangogh_dpm_set_vcn_enable(struct smu_context *smu, + bool enable, + int inst) { int ret = 0; @@ -1052,48 +1056,34 @@ static int vangogh_get_power_profile_mode(struct smu_context *smu, return size; } -static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size) +static int vangogh_set_power_profile_mode(struct smu_context *smu, + u32 workload_mask, + long *custom_params, + u32 custom_params_max_idx) { - int workload_type, ret; - uint32_t profile_mode = input[size]; + u32 backend_workload_mask = 0; + int ret; - if (profile_mode >= PP_SMC_POWER_PROFILE_COUNT) { - dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode); - return -EINVAL; - } - - if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT || - profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING) - return 0; - - /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, - profile_mode); - if (workload_type < 0) { - dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on VANGOGH\n", - profile_mode); - return -EINVAL; - } + smu_cmn_get_backend_workload_mask(smu, workload_mask, + &backend_workload_mask); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify, - 1 << workload_type, - NULL); + backend_workload_mask, + NULL); if (ret) { - dev_err_once(smu->adev->dev, "Fail to set workload type %d\n", - workload_type); + dev_err_once(smu->adev->dev, "Fail to set workload mask 0x%08x\n", + workload_mask); return ret; } - smu->power_profile_mode = profile_mode; - - return 0; + return ret; } static int vangogh_set_soft_freq_limited_range(struct smu_context *smu, - enum smu_clk_type clk_type, - uint32_t min, - uint32_t max) + enum smu_clk_type clk_type, + uint32_t min, + uint32_t max, + bool automatic) { int ret = 0; @@ -1299,7 +1289,7 @@ static int vangogh_force_dpm_limit_value(struct smu_context *smu, bool highest) return ret; force_freq = highest ? max_freq : min_freq; - ret = vangogh_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq); + ret = vangogh_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq, false); if (ret) return ret; } @@ -1335,7 +1325,7 @@ static int vangogh_unforce_dpm_levels(struct smu_context *smu) if (ret) return ret; - ret = vangogh_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq); + ret = vangogh_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false); if (ret) return ret; @@ -1354,7 +1344,7 @@ static int vangogh_set_peak_clock_by_device(struct smu_context *smu) if (ret) return ret; - ret = vangogh_set_soft_freq_limited_range(smu, SMU_FCLK, fclk_freq, fclk_freq); + ret = vangogh_set_soft_freq_limited_range(smu, SMU_FCLK, fclk_freq, fclk_freq, false); if (ret) return ret; @@ -1362,7 +1352,7 @@ static int vangogh_set_peak_clock_by_device(struct smu_context *smu) if (ret) return ret; - ret = vangogh_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_freq, socclk_freq); + ret = vangogh_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_freq, socclk_freq, false); if (ret) return ret; @@ -1370,7 +1360,7 @@ static int vangogh_set_peak_clock_by_device(struct smu_context *smu) if (ret) return ret; - ret = vangogh_set_soft_freq_limited_range(smu, SMU_VCLK, vclk_freq, vclk_freq); + ret = vangogh_set_soft_freq_limited_range(smu, SMU_VCLK, vclk_freq, vclk_freq, false); if (ret) return ret; @@ -1378,7 +1368,7 @@ static int vangogh_set_peak_clock_by_device(struct smu_context *smu) if (ret) return ret; - ret = vangogh_set_soft_freq_limited_range(smu, SMU_DCLK, dclk_freq, dclk_freq); + ret = vangogh_set_soft_freq_limited_range(smu, SMU_DCLK, dclk_freq, dclk_freq, false); if (ret) return ret; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c index cc0504b063fa..37d82a71a2d7 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c @@ -645,7 +645,9 @@ static enum amd_pm_state_type renoir_get_current_power_state(struct smu_context return pm_type; } -static int renoir_dpm_set_vcn_enable(struct smu_context *smu, bool enable) +static int renoir_dpm_set_vcn_enable(struct smu_context *smu, + bool enable, + int inst) { int ret = 0; @@ -707,7 +709,7 @@ static int renoir_force_dpm_limit_value(struct smu_context *smu, bool highest) return ret; force_freq = highest ? max_freq : min_freq; - ret = smu_v12_0_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq); + ret = smu_v12_0_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq, false); if (ret) return ret; } @@ -740,7 +742,7 @@ static int renoir_unforce_dpm_levels(struct smu_context *smu) { if (ret) return ret; - ret = smu_v12_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq); + ret = smu_v12_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false); if (ret) return ret; } @@ -862,44 +864,27 @@ static int renoir_force_clk_levels(struct smu_context *smu, return ret; } -static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size) +static int renoir_set_power_profile_mode(struct smu_context *smu, + u32 workload_mask, + long *custom_params, + u32 custom_params_max_idx) { - int workload_type, ret; - uint32_t profile_mode = input[size]; + int ret; + u32 backend_workload_mask = 0; - if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { - dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode); - return -EINVAL; - } - - if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT || - profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING) - return 0; - - /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, - profile_mode); - if (workload_type < 0) { - /* - * TODO: If some case need switch to powersave/default power mode - * then can consider enter WORKLOAD_COMPUTE/WORKLOAD_CUSTOM for power saving. - */ - dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on RENOIR\n", profile_mode); - return -EINVAL; - } + smu_cmn_get_backend_workload_mask(smu, workload_mask, + &backend_workload_mask); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify, - 1 << workload_type, - NULL); + backend_workload_mask, + NULL); if (ret) { - dev_err_once(smu->adev->dev, "Fail to set workload type %d\n", workload_type); + dev_err_once(smu->adev->dev, "Failed to set workload mask 0x08%x\n", + workload_mask); return ret; } - smu->power_profile_mode = profile_mode; - - return 0; + return ret; } static int renoir_set_peak_clock_by_device(struct smu_context *smu) @@ -911,7 +896,7 @@ static int renoir_set_peak_clock_by_device(struct smu_context *smu) if (ret) return ret; - ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_SCLK, sclk_freq, sclk_freq); + ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_SCLK, sclk_freq, sclk_freq, false); if (ret) return ret; @@ -919,7 +904,7 @@ static int renoir_set_peak_clock_by_device(struct smu_context *smu) if (ret) return ret; - ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_UCLK, uclk_freq, uclk_freq); + ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_UCLK, uclk_freq, uclk_freq, false); if (ret) return ret; @@ -961,13 +946,13 @@ static int renior_set_dpm_profile_freq(struct smu_context *smu, } if (sclk) - ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_SCLK, sclk, sclk); + ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_SCLK, sclk, sclk, false); if (socclk) - ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk, socclk); + ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk, socclk, false); if (fclk) - ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_FCLK, fclk, fclk); + ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_FCLK, fclk, fclk, false); return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c index ed15f5a0fd11..3d3cd546f0ad 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c @@ -211,7 +211,7 @@ int smu_v12_0_mode2_reset(struct smu_context *smu) } int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, - uint32_t min, uint32_t max) + uint32_t min, uint32_t max, bool automatic) { int ret = 0; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index 2c35eb31475a..f6b029354327 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -1297,9 +1297,10 @@ static int aldebaran_set_performance_level(struct smu_context *smu, } static int aldebaran_set_soft_freq_limited_range(struct smu_context *smu, - enum smu_clk_type clk_type, - uint32_t min, - uint32_t max) + enum smu_clk_type clk_type, + uint32_t min, + uint32_t max, + bool automatic) { struct smu_dpm_context *smu_dpm = &(smu->smu_dpm); struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context; @@ -1328,7 +1329,7 @@ static int aldebaran_set_soft_freq_limited_range(struct smu_context *smu, return 0; ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, - min, max); + min, max, false); if (!ret) { pstate_table->gfxclk_pstate.curr.min = min; pstate_table->gfxclk_pstate.curr.max = max; @@ -1348,7 +1349,7 @@ static int aldebaran_set_soft_freq_limited_range(struct smu_context *smu, /* Restore default min/max clocks and enable determinism */ min_clk = dpm_context->dpm_tables.gfx_table.min; max_clk = dpm_context->dpm_tables.gfx_table.max; - ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk); + ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk, false); if (!ret) { usleep_range(500, 1000); ret = smu_cmn_send_smc_msg_with_param(smu, @@ -1422,7 +1423,7 @@ static int aldebaran_usr_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_ min_clk = dpm_context->dpm_tables.gfx_table.min; max_clk = dpm_context->dpm_tables.gfx_table.max; - return aldebaran_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk); + return aldebaran_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk, false); } break; case PP_OD_COMMIT_DPM_TABLE: @@ -1441,7 +1442,7 @@ static int aldebaran_usr_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_ min_clk = pstate_table->gfxclk_pstate.custom.min; max_clk = pstate_table->gfxclk_pstate.custom.max; - return aldebaran_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk); + return aldebaran_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk, false); } break; default: diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index e17466cc1952..2bfea740dace 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -1608,7 +1608,8 @@ failed: int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, - uint32_t max) + uint32_t max, + bool automatic) { int ret = 0, clk_id = 0; uint32_t param; @@ -1623,7 +1624,10 @@ int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu, return clk_id; if (max > 0) { - param = (uint32_t)((clk_id << 16) | (max & 0xffff)); + if (automatic) + param = (uint32_t)((clk_id << 16) | 0xffff); + else + param = (uint32_t)((clk_id << 16) | (max & 0xffff)); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq, param, NULL); if (ret) @@ -1631,7 +1635,10 @@ int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu, } if (min > 0) { - param = (uint32_t)((clk_id << 16) | (min & 0xffff)); + if (automatic) + param = (uint32_t)((clk_id << 16) | 0); + else + param = (uint32_t)((clk_id << 16) | (min & 0xffff)); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq, param, NULL); if (ret) @@ -1708,6 +1715,7 @@ int smu_v13_0_set_performance_level(struct smu_context *smu, uint32_t dclk_min = 0, dclk_max = 0; uint32_t fclk_min = 0, fclk_max = 0; int ret = 0, i; + bool auto_level = false; switch (level) { case AMD_DPM_FORCED_LEVEL_HIGH: @@ -1739,6 +1747,7 @@ int smu_v13_0_set_performance_level(struct smu_context *smu, dclk_max = dclk_table->max; fclk_min = fclk_table->min; fclk_max = fclk_table->max; + auto_level = true; break; case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: sclk_min = sclk_max = pstate_table->gfxclk_pstate.standard; @@ -1780,13 +1789,15 @@ int smu_v13_0_set_performance_level(struct smu_context *smu, vclk_min = vclk_max = 0; dclk_min = dclk_max = 0; fclk_min = fclk_max = 0; + auto_level = false; } if (sclk_min && sclk_max) { ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, sclk_min, - sclk_max); + sclk_max, + auto_level); if (ret) return ret; @@ -1798,7 +1809,8 @@ int smu_v13_0_set_performance_level(struct smu_context *smu, ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_MCLK, mclk_min, - mclk_max); + mclk_max, + auto_level); if (ret) return ret; @@ -1810,7 +1822,8 @@ int smu_v13_0_set_performance_level(struct smu_context *smu, ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_min, - socclk_max); + socclk_max, + auto_level); if (ret) return ret; @@ -1825,7 +1838,8 @@ int smu_v13_0_set_performance_level(struct smu_context *smu, ret = smu_v13_0_set_soft_freq_limited_range(smu, i ? SMU_VCLK1 : SMU_VCLK, vclk_min, - vclk_max); + vclk_max, + auto_level); if (ret) return ret; } @@ -1840,7 +1854,8 @@ int smu_v13_0_set_performance_level(struct smu_context *smu, ret = smu_v13_0_set_soft_freq_limited_range(smu, i ? SMU_DCLK1 : SMU_DCLK, dclk_min, - dclk_max); + dclk_max, + auto_level); if (ret) return ret; } @@ -1852,7 +1867,8 @@ int smu_v13_0_set_performance_level(struct smu_context *smu, ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_FCLK, fclk_min, - fclk_max); + fclk_max, + auto_level); if (ret) return ret; @@ -2088,7 +2104,8 @@ int smu_v13_0_get_current_pcie_link_speed(struct smu_context *smu) } int smu_v13_0_set_vcn_enable(struct smu_context *smu, - bool enable) + bool enable, + int inst) { struct amdgpu_device *adev = smu->adev; int i, ret = 0; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index 1d024b122b0c..3aa705aae4c0 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -107,6 +107,8 @@ #define PP_OD_FEATURE_FAN_ACOUSTIC_TARGET 8 #define PP_OD_FEATURE_FAN_TARGET_TEMPERATURE 9 #define PP_OD_FEATURE_FAN_MINIMUM_PWM 10 +#define PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE 11 +#define PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP 12 #define LINK_SPEED_MAX 3 @@ -736,19 +738,6 @@ static bool smu_v13_0_0_is_dpm_running(struct smu_context *smu) return !!(feature_enabled & SMC_DPM_FEATURE); } -static void smu_v13_0_0_dump_pptable(struct smu_context *smu) -{ - struct smu_table_context *table_context = &smu->smu_table; - PPTable_t *pptable = table_context->driver_pptable; - SkuTable_t *skutable = &pptable->SkuTable; - - dev_info(smu->adev->dev, "Dumped PPTable:\n"); - - dev_info(smu->adev->dev, "Version = 0x%08x\n", skutable->Version); - dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", skutable->FeaturesToRun[0]); - dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", skutable->FeaturesToRun[1]); -} - static int smu_v13_0_0_system_features_control(struct smu_context *smu, bool en) { @@ -1143,6 +1132,14 @@ static void smu_v13_0_0_get_od_setting_limits(struct smu_context *smu, od_min_setting = overdrive_lowerlimits->FanMinimumPwm; od_max_setting = overdrive_upperlimits->FanMinimumPwm; break; + case PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE: + od_min_setting = overdrive_lowerlimits->FanZeroRpmEnable; + od_max_setting = overdrive_upperlimits->FanZeroRpmEnable; + break; + case PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP: + od_min_setting = overdrive_lowerlimits->FanZeroRpmStopTemp; + od_max_setting = overdrive_upperlimits->FanZeroRpmStopTemp; + break; default: od_min_setting = od_max_setting = INT_MAX; break; @@ -1463,6 +1460,42 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu, min_value, max_value); break; + case SMU_OD_FAN_ZERO_RPM_ENABLE: + if (!smu_v13_0_0_is_od_feature_supported(smu, + PP_OD_FEATURE_ZERO_FAN_BIT)) + break; + + size += sysfs_emit_at(buf, size, "FAN_ZERO_RPM_ENABLE:\n"); + size += sysfs_emit_at(buf, size, "%d\n", + (int)od_table->OverDriveTable.FanZeroRpmEnable); + + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + smu_v13_0_0_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE, + &min_value, + &max_value); + size += sysfs_emit_at(buf, size, "ZERO_RPM_ENABLE: %u %u\n", + min_value, max_value); + break; + + case SMU_OD_FAN_ZERO_RPM_STOP_TEMP: + if (!smu_v13_0_0_is_od_feature_supported(smu, + PP_OD_FEATURE_ZERO_FAN_BIT)) + break; + + size += sysfs_emit_at(buf, size, "FAN_ZERO_RPM_STOP_TEMPERATURE:\n"); + size += sysfs_emit_at(buf, size, "%d\n", + (int)od_table->OverDriveTable.FanZeroRpmStopTemp); + + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + smu_v13_0_0_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP, + &min_value, + &max_value); + size += sysfs_emit_at(buf, size, "ZERO_RPM_STOP_TEMPERATURE: %u %u\n", + min_value, max_value); + break; + case SMU_OD_RANGE: if (!smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT) && !smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT) && @@ -1560,6 +1593,16 @@ static int smu_v13_0_0_od_restore_table_single(struct smu_context *smu, long inp od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); break; + case PP_OD_EDIT_FAN_ZERO_RPM_ENABLE: + od_table->OverDriveTable.FanZeroRpmEnable = + boot_overdrive_table->OverDriveTable.FanZeroRpmEnable; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT); + break; + case PP_OD_EDIT_FAN_ZERO_RPM_STOP_TEMP: + od_table->OverDriveTable.FanZeroRpmStopTemp = + boot_overdrive_table->OverDriveTable.FanZeroRpmStopTemp; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT); + break; default: dev_info(adev->dev, "Invalid table index: %ld\n", input); return -EINVAL; @@ -1853,6 +1896,48 @@ static int smu_v13_0_0_od_edit_dpm_table(struct smu_context *smu, od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); break; + case PP_OD_EDIT_FAN_ZERO_RPM_ENABLE: + if (!smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_ZERO_FAN_BIT)) { + dev_warn(adev->dev, "Zero RPM setting not supported!\n"); + return -ENOTSUPP; + } + + smu_v13_0_0_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE, + &minimum, + &maximum); + if (input[0] < minimum || + input[0] > maximum) { + dev_info(adev->dev, "zero RPM enable setting(%ld) must be within [%d, %d]!\n", + input[0], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.FanZeroRpmEnable = input[0]; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT); + break; + + case PP_OD_EDIT_FAN_ZERO_RPM_STOP_TEMP: + if (!smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_ZERO_FAN_BIT)) { + dev_warn(adev->dev, "Zero RPM setting not supported!\n"); + return -ENOTSUPP; + } + + smu_v13_0_0_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP, + &minimum, + &maximum); + if (input[0] < minimum || + input[0] > maximum) { + dev_info(adev->dev, "zero RPM stop temperature setting(%ld) must be within [%d, %d]!\n", + input[0], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.FanZeroRpmStopTemp = input[0]; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT); + break; + case PP_OD_RESTORE_DEFAULT_TABLE: if (size == 1) { ret = smu_v13_0_0_od_restore_table_single(smu, input[0]); @@ -1975,7 +2060,8 @@ static int smu_v13_0_0_force_clk_levels(struct smu_context *smu, ret = smu_v13_0_set_soft_freq_limited_range(smu, clk_type, min_freq, - max_freq); + max_freq, + false); break; case SMU_DCEFCLK: case SMU_PCIE: @@ -2122,7 +2208,11 @@ static void smu_v13_0_0_set_supported_od_feature_mask(struct smu_context *smu) OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_RETRIEVE | OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET | OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE | - OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET; + OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET | + OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_RETRIEVE | + OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_SET | + OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_RETRIEVE | + OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_SET; } static int smu_v13_0_0_set_default_od_settings(struct smu_context *smu) @@ -2188,6 +2278,10 @@ static int smu_v13_0_0_set_default_od_settings(struct smu_context *smu) user_od_table_bak.OverDriveTable.FanTargetTemperature; user_od_table->OverDriveTable.FanMinimumPwm = user_od_table_bak.OverDriveTable.FanMinimumPwm; + user_od_table->OverDriveTable.FanZeroRpmEnable = + user_od_table_bak.OverDriveTable.FanZeroRpmEnable; + user_od_table->OverDriveTable.FanZeroRpmStopTemp = + user_od_table_bak.OverDriveTable.FanZeroRpmStopTemp; } smu_v13_0_0_set_supported_od_feature_mask(smu); @@ -2477,104 +2571,129 @@ static int smu_v13_0_0_get_power_profile_mode(struct smu_context *smu, return size; } -static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, - long *input, - uint32_t size) +#define SMU_13_0_0_CUSTOM_PARAMS_COUNT 9 +#define SMU_13_0_0_CUSTOM_PARAMS_CLOCK_COUNT 2 +#define SMU_13_0_0_CUSTOM_PARAMS_SIZE (SMU_13_0_0_CUSTOM_PARAMS_CLOCK_COUNT * SMU_13_0_0_CUSTOM_PARAMS_COUNT * sizeof(long)) + +static int smu_v13_0_0_set_power_profile_mode_coeff(struct smu_context *smu, + long *input) { DpmActivityMonitorCoeffIntExternal_t activity_monitor_external; DpmActivityMonitorCoeffInt_t *activity_monitor = &(activity_monitor_external.DpmActivityMonitorCoeffInt); - int workload_type, ret = 0; - u32 workload_mask; - - smu->power_profile_mode = input[size]; + int ret, idx; - if (smu->power_profile_mode >= PP_SMC_POWER_PROFILE_COUNT) { - dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode); - return -EINVAL; + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, + WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor_external), + false); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); + return ret; } - if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { - if (size != 9) - return -EINVAL; - - ret = smu_cmn_update_table(smu, - SMU_TABLE_ACTIVITY_MONITOR_COEFF, - WORKLOAD_PPLIB_CUSTOM_BIT, - (void *)(&activity_monitor_external), - false); - if (ret) { - dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); - return ret; - } - - switch (input[0]) { - case 0: /* Gfxclk */ - activity_monitor->Gfx_FPS = input[1]; - activity_monitor->Gfx_MinActiveFreqType = input[2]; - activity_monitor->Gfx_MinActiveFreq = input[3]; - activity_monitor->Gfx_BoosterFreqType = input[4]; - activity_monitor->Gfx_BoosterFreq = input[5]; - activity_monitor->Gfx_PD_Data_limit_c = input[6]; - activity_monitor->Gfx_PD_Data_error_coeff = input[7]; - activity_monitor->Gfx_PD_Data_error_rate_coeff = input[8]; - break; - case 1: /* Fclk */ - activity_monitor->Fclk_FPS = input[1]; - activity_monitor->Fclk_MinActiveFreqType = input[2]; - activity_monitor->Fclk_MinActiveFreq = input[3]; - activity_monitor->Fclk_BoosterFreqType = input[4]; - activity_monitor->Fclk_BoosterFreq = input[5]; - activity_monitor->Fclk_PD_Data_limit_c = input[6]; - activity_monitor->Fclk_PD_Data_error_coeff = input[7]; - activity_monitor->Fclk_PD_Data_error_rate_coeff = input[8]; - break; - default: - return -EINVAL; - } + idx = 0 * SMU_13_0_0_CUSTOM_PARAMS_COUNT; + if (input[idx]) { + /* Gfxclk */ + activity_monitor->Gfx_FPS = input[idx + 1]; + activity_monitor->Gfx_MinActiveFreqType = input[idx + 2]; + activity_monitor->Gfx_MinActiveFreq = input[idx + 3]; + activity_monitor->Gfx_BoosterFreqType = input[idx + 4]; + activity_monitor->Gfx_BoosterFreq = input[idx + 5]; + activity_monitor->Gfx_PD_Data_limit_c = input[idx + 6]; + activity_monitor->Gfx_PD_Data_error_coeff = input[idx + 7]; + activity_monitor->Gfx_PD_Data_error_rate_coeff = input[idx + 8]; + } + idx = 1 * SMU_13_0_0_CUSTOM_PARAMS_COUNT; + if (input[idx]) { + /* Fclk */ + activity_monitor->Fclk_FPS = input[idx + 1]; + activity_monitor->Fclk_MinActiveFreqType = input[idx + 2]; + activity_monitor->Fclk_MinActiveFreq = input[idx + 3]; + activity_monitor->Fclk_BoosterFreqType = input[idx + 4]; + activity_monitor->Fclk_BoosterFreq = input[idx + 5]; + activity_monitor->Fclk_PD_Data_limit_c = input[idx + 6]; + activity_monitor->Fclk_PD_Data_error_coeff = input[idx + 7]; + activity_monitor->Fclk_PD_Data_error_rate_coeff = input[idx + 8]; + } - ret = smu_cmn_update_table(smu, - SMU_TABLE_ACTIVITY_MONITOR_COEFF, - WORKLOAD_PPLIB_CUSTOM_BIT, - (void *)(&activity_monitor_external), - true); - if (ret) { - dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); - return ret; - } + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, + WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor_external), + true); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); + return ret; } - /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, - smu->power_profile_mode); + return ret; +} - if (workload_type < 0) - return -EINVAL; +static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, + u32 workload_mask, + long *custom_params, + u32 custom_params_max_idx) +{ + u32 backend_workload_mask = 0; + int workload_type, ret, idx = -1, i; - workload_mask = 1 << workload_type; + smu_cmn_get_backend_workload_mask(smu, workload_mask, + &backend_workload_mask); /* Add optimizations for SMU13.0.0/10. Reuse the power saving profile */ - if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE) { - if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) && - ((smu->adev->pm.fw_version == 0x004e6601) || - (smu->adev->pm.fw_version >= 0x004e7300))) || - (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) && - smu->adev->pm.fw_version >= 0x00504500)) { - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, - PP_SMC_POWER_PROFILE_POWERSAVING); - if (workload_type >= 0) - workload_mask |= 1 << workload_type; + if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) && + ((smu->adev->pm.fw_version == 0x004e6601) || + (smu->adev->pm.fw_version >= 0x004e7300))) || + (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) && + smu->adev->pm.fw_version >= 0x00504500)) { + workload_type = smu_cmn_to_asic_specific_index(smu, + CMN2ASIC_MAPPING_WORKLOAD, + PP_SMC_POWER_PROFILE_POWERSAVING); + if (workload_type >= 0) + backend_workload_mask |= 1 << workload_type; + } + + if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) { + if (!smu->custom_profile_params) { + smu->custom_profile_params = + kzalloc(SMU_13_0_0_CUSTOM_PARAMS_SIZE, GFP_KERNEL); + if (!smu->custom_profile_params) + return -ENOMEM; } + if (custom_params && custom_params_max_idx) { + if (custom_params_max_idx != SMU_13_0_0_CUSTOM_PARAMS_COUNT) + return -EINVAL; + if (custom_params[0] >= SMU_13_0_0_CUSTOM_PARAMS_CLOCK_COUNT) + return -EINVAL; + idx = custom_params[0] * SMU_13_0_0_CUSTOM_PARAMS_COUNT; + smu->custom_profile_params[idx] = 1; + for (i = 1; i < custom_params_max_idx; i++) + smu->custom_profile_params[idx + i] = custom_params[i]; + } + ret = smu_v13_0_0_set_power_profile_mode_coeff(smu, + smu->custom_profile_params); + if (ret) { + if (idx != -1) + smu->custom_profile_params[idx] = 0; + return ret; + } + } else if (smu->custom_profile_params) { + memset(smu->custom_profile_params, 0, SMU_13_0_0_CUSTOM_PARAMS_SIZE); } ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_SetWorkloadMask, - workload_mask, - NULL); - if (!ret) - smu->workload_mask = workload_mask; + SMU_MSG_SetWorkloadMask, + backend_workload_mask, + NULL); + if (ret) { + dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n", + workload_mask); + if (idx != -1) + smu->custom_profile_params[idx] = 0; + return ret; + } return ret; } @@ -3026,7 +3145,6 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = { .i2c_init = smu_v13_0_0_i2c_control_init, .i2c_fini = smu_v13_0_0_i2c_control_fini, .is_dpm_running = smu_v13_0_0_is_dpm_running, - .dump_pptable = smu_v13_0_0_dump_pptable, .init_microcode = smu_v13_0_init_microcode, .load_microcode = smu_v13_0_load_microcode, .fini_microcode = smu_v13_0_fini_microcode, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c index 9c2c43bfed0b..f5db181ef489 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c @@ -193,7 +193,9 @@ static int smu_v13_0_5_system_features_control(struct smu_context *smu, bool en) return ret; } -static int smu_v13_0_5_dpm_set_vcn_enable(struct smu_context *smu, bool enable) +static int smu_v13_0_5_dpm_set_vcn_enable(struct smu_context *smu, + bool enable, + int inst) { int ret = 0; @@ -811,9 +813,10 @@ failed: } static int smu_v13_0_5_set_soft_freq_limited_range(struct smu_context *smu, - enum smu_clk_type clk_type, - uint32_t min, - uint32_t max) + enum smu_clk_type clk_type, + uint32_t min, + uint32_t max, + bool automatic) { enum smu_message_type msg_set_min, msg_set_max; uint32_t min_clk = min; @@ -950,7 +953,7 @@ static int smu_v13_0_5_force_clk_levels(struct smu_context *smu, if (ret) goto force_level_out; - ret = smu_v13_0_5_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq); + ret = smu_v13_0_5_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false); if (ret) goto force_level_out; break; @@ -1046,9 +1049,10 @@ static int smu_v13_0_5_set_performance_level(struct smu_context *smu, if (sclk_min && sclk_max) { ret = smu_v13_0_5_set_soft_freq_limited_range(smu, - SMU_SCLK, - sclk_min, - sclk_max); + SMU_SCLK, + sclk_min, + sclk_max, + false); if (ret) return ret; @@ -1060,7 +1064,8 @@ static int smu_v13_0_5_set_performance_level(struct smu_context *smu, ret = smu_v13_0_5_set_soft_freq_limited_range(smu, SMU_VCLK, vclk_min, - vclk_max); + vclk_max, + false); if (ret) return ret; } @@ -1069,7 +1074,8 @@ static int smu_v13_0_5_set_performance_level(struct smu_context *smu, ret = smu_v13_0_5_set_soft_freq_limited_range(smu, SMU_DCLK, dclk_min, - dclk_max); + dclk_max, + false); if (ret) return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index 55ed6247eb61..ab3c93ddce46 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -96,12 +96,29 @@ MODULE_FIRMWARE("amdgpu/smu_13_0_14.bin"); #define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xE0 #define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0x5 #define LINK_SPEED_MAX 4 - #define SMU_13_0_6_DSCLK_THRESHOLD 140 #define MCA_BANK_IPID(_ip, _hwid, _type) \ [AMDGPU_MCA_IP_##_ip] = { .hwid = _hwid, .mcatype = _type, } +static inline bool smu_v13_0_6_is_unified_metrics(struct smu_context *smu) +{ + return (smu->adev->flags & AMD_IS_APU) && + smu->smc_fw_version <= 0x4556900; +} + +static inline bool smu_v13_0_6_is_other_end_count_available(struct smu_context *smu) +{ + switch (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)) { + case IP_VERSION(13, 0, 6): + return smu->smc_fw_version >= 0x557600; + case IP_VERSION(13, 0, 14): + return smu->smc_fw_version >= 0x05550E00; + default: + return false; + } +} + struct mca_bank_ipid { enum amdgpu_mca_ip ip; uint16_t hwid; @@ -253,7 +270,7 @@ struct PPTable_t { #define SMUQ10_TO_UINT(x) ((x) >> 10) #define SMUQ10_FRAC(x) ((x) & 0x3ff) #define SMUQ10_ROUND(x) ((SMUQ10_TO_UINT(x)) + ((SMUQ10_FRAC(x)) >= 0x200)) -#define GET_METRIC_FIELD(field) ((adev->flags & AMD_IS_APU) ?\ +#define GET_METRIC_FIELD(field, flag) ((flag) ?\ (metrics_a->field) : (metrics_x->field)) struct smu_v13_0_6_dpm_map { @@ -352,7 +369,7 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu) return -ENOMEM; smu_table->metrics_time = 0; - smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_5); + smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_7); smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); if (!smu_table->gpu_metrics_table) { @@ -583,7 +600,7 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) MetricsTableA_t *metrics_a = (MetricsTableA_t *)smu_table->metrics_table; struct PPTable_t *pptable = (struct PPTable_t *)smu_table->driver_pptable; - struct amdgpu_device *adev = smu->adev; + bool flag = smu_v13_0_6_is_unified_metrics(smu); int ret, i, retry = 100; uint32_t table_version; @@ -595,7 +612,7 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) return ret; /* Ensure that metrics have been updated */ - if (GET_METRIC_FIELD(AccumulationCounter)) + if (GET_METRIC_FIELD(AccumulationCounter, flag)) break; usleep_range(1000, 1100); @@ -612,29 +629,29 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) table_version; pptable->MaxSocketPowerLimit = - SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketPowerLimit)); + SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketPowerLimit, flag)); pptable->MaxGfxclkFrequency = - SMUQ10_ROUND(GET_METRIC_FIELD(MaxGfxclkFrequency)); + SMUQ10_ROUND(GET_METRIC_FIELD(MaxGfxclkFrequency, flag)); pptable->MinGfxclkFrequency = - SMUQ10_ROUND(GET_METRIC_FIELD(MinGfxclkFrequency)); + SMUQ10_ROUND(GET_METRIC_FIELD(MinGfxclkFrequency, flag)); for (i = 0; i < 4; ++i) { pptable->FclkFrequencyTable[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequencyTable)[i]); + SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequencyTable, flag)[i]); pptable->UclkFrequencyTable[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequencyTable)[i]); + SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequencyTable, flag)[i]); pptable->SocclkFrequencyTable[i] = SMUQ10_ROUND( - GET_METRIC_FIELD(SocclkFrequencyTable)[i]); + GET_METRIC_FIELD(SocclkFrequencyTable, flag)[i]); pptable->VclkFrequencyTable[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequencyTable)[i]); + SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequencyTable, flag)[i]); pptable->DclkFrequencyTable[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequencyTable)[i]); + SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequencyTable, flag)[i]); pptable->LclkFrequencyTable[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(LclkFrequencyTable)[i]); + SMUQ10_ROUND(GET_METRIC_FIELD(LclkFrequencyTable, flag)[i]); } /* use AID0 serial number by default */ - pptable->PublicSerialNumber_AID = GET_METRIC_FIELD(PublicSerialNumber_AID)[0]; + pptable->PublicSerialNumber_AID = GET_METRIC_FIELD(PublicSerialNumber_AID, flag)[0]; pptable->Init = true; } @@ -957,6 +974,7 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu, struct smu_table_context *smu_table = &smu->smu_table; MetricsTableX_t *metrics_x = (MetricsTableX_t *)smu_table->metrics_table; MetricsTableA_t *metrics_a = (MetricsTableA_t *)smu_table->metrics_table; + bool flag = smu_v13_0_6_is_unified_metrics(smu); struct amdgpu_device *adev = smu->adev; int ret = 0; int xcc_id; @@ -971,50 +989,50 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu, case METRICS_AVERAGE_GFXCLK: if (smu->smc_fw_version >= 0x552F00) { xcc_id = GET_INST(GC, 0); - *value = SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency)[xcc_id]); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency, flag)[xcc_id]); } else { *value = 0; } break; case METRICS_CURR_SOCCLK: case METRICS_AVERAGE_SOCCLK: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency)[0]); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency, flag)[0]); break; case METRICS_CURR_UCLK: case METRICS_AVERAGE_UCLK: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency)); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency, flag)); break; case METRICS_CURR_VCLK: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency)[0]); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency, flag)[0]); break; case METRICS_CURR_DCLK: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency)[0]); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency, flag)[0]); break; case METRICS_CURR_FCLK: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequency)); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequency, flag)); break; case METRICS_AVERAGE_GFXACTIVITY: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy)); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy, flag)); break; case METRICS_AVERAGE_MEMACTIVITY: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization)); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization, flag)); break; case METRICS_CURR_SOCKETPOWER: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower)) << 8; + *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower, flag)) << 8; break; case METRICS_TEMPERATURE_HOTSPOT: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature)) * + *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature, flag)) * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; break; case METRICS_TEMPERATURE_MEM: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature)) * + *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature, flag)) * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; break; /* This is the max of all VRs and not just SOC VR. * No need to define another data type for the same. */ case METRICS_TEMPERATURE_VRSOC: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature)) * + *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature, flag)) * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; break; default: @@ -1739,7 +1757,7 @@ static int smu_v13_0_6_set_performance_level(struct smu_context *smu, if (uclk_table->max != pstate_table->uclk_pstate.curr.max) { /* Min UCLK is not expected to be changed */ ret = smu_v13_0_set_soft_freq_limited_range( - smu, SMU_UCLK, 0, uclk_table->max); + smu, SMU_UCLK, 0, uclk_table->max, false); if (ret) return ret; pstate_table->uclk_pstate.curr.max = uclk_table->max; @@ -1758,7 +1776,8 @@ static int smu_v13_0_6_set_performance_level(struct smu_context *smu, static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, - uint32_t min, uint32_t max) + uint32_t min, uint32_t max, + bool automatic) { struct smu_dpm_context *smu_dpm = &(smu->smu_dpm); struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context; @@ -1806,7 +1825,7 @@ static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu, return -EOPNOTSUPP; /* Only max clock limiting is allowed for UCLK */ ret = smu_v13_0_set_soft_freq_limited_range( - smu, SMU_UCLK, 0, max); + smu, SMU_UCLK, 0, max, false); if (!ret) pstate_table->uclk_pstate.curr.max = max; } @@ -1946,7 +1965,7 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu, max_clk = dpm_context->dpm_tables.gfx_table.max; ret = smu_v13_0_6_set_soft_freq_limited_range( - smu, SMU_GFXCLK, min_clk, max_clk); + smu, SMU_GFXCLK, min_clk, max_clk, false); if (ret) return ret; @@ -1954,7 +1973,7 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu, min_clk = dpm_context->dpm_tables.uclk_table.min; max_clk = dpm_context->dpm_tables.uclk_table.max; ret = smu_v13_0_6_set_soft_freq_limited_range( - smu, SMU_UCLK, min_clk, max_clk); + smu, SMU_UCLK, min_clk, max_clk, false); if (ret) return ret; pstate_table->uclk_pstate.custom.max = 0; @@ -1978,7 +1997,7 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu, max_clk = pstate_table->gfxclk_pstate.custom.max; ret = smu_v13_0_6_set_soft_freq_limited_range( - smu, SMU_GFXCLK, min_clk, max_clk); + smu, SMU_GFXCLK, min_clk, max_clk, false); if (ret) return ret; @@ -1989,7 +2008,7 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu, min_clk = pstate_table->uclk_pstate.curr.min; max_clk = pstate_table->uclk_pstate.custom.max; return smu_v13_0_6_set_soft_freq_limited_range( - smu, SMU_UCLK, min_clk, max_clk); + smu, SMU_UCLK, min_clk, max_clk, false); } break; default: @@ -2299,14 +2318,18 @@ static int smu_v13_0_6_get_current_pcie_link_speed(struct smu_context *smu) static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table) { + bool per_inst, smu_13_0_6_per_inst, smu_13_0_14_per_inst, apu_per_inst; struct smu_table_context *smu_table = &smu->smu_table; - struct gpu_metrics_v1_5 *gpu_metrics = - (struct gpu_metrics_v1_5 *)smu_table->gpu_metrics_table; + struct gpu_metrics_v1_7 *gpu_metrics = + (struct gpu_metrics_v1_7 *)smu_table->gpu_metrics_table; + bool flag = smu_v13_0_6_is_unified_metrics(smu); + int ret = 0, xcc_id, inst, i, j, k, idx; struct amdgpu_device *adev = smu->adev; - int ret = 0, xcc_id, inst, i, j; MetricsTableX_t *metrics_x; MetricsTableA_t *metrics_a; + struct amdgpu_xcp *xcp; u16 link_width_level; + u32 inst_mask; metrics_x = kzalloc(max(sizeof(MetricsTableX_t), sizeof(MetricsTableA_t)), GFP_KERNEL); ret = smu_v13_0_6_get_metrics_table(smu, metrics_x, true); @@ -2317,53 +2340,60 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table metrics_a = (MetricsTableA_t *)metrics_x; - smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 5); + smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 7); gpu_metrics->temperature_hotspot = - SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature)); + SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature, flag)); /* Individual HBM stack temperature is not reported */ gpu_metrics->temperature_mem = - SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature)); + SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature, flag)); /* Reports max temperature of all voltage rails */ gpu_metrics->temperature_vrsoc = - SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature)); + SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature, flag)); gpu_metrics->average_gfx_activity = - SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy)); + SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy, flag)); gpu_metrics->average_umc_activity = - SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization)); + SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization, flag)); gpu_metrics->curr_socket_power = - SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower)); + SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower, flag)); /* Energy counter reported in 15.259uJ (2^-16) units */ - gpu_metrics->energy_accumulator = GET_METRIC_FIELD(SocketEnergyAcc); + gpu_metrics->energy_accumulator = GET_METRIC_FIELD(SocketEnergyAcc, flag); for (i = 0; i < MAX_GFX_CLKS; i++) { xcc_id = GET_INST(GC, i); if (xcc_id >= 0) gpu_metrics->current_gfxclk[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency)[xcc_id]); + SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency, flag)[xcc_id]); if (i < MAX_CLKS) { gpu_metrics->current_socclk[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency)[i]); + SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency, flag)[i]); inst = GET_INST(VCN, i); if (inst >= 0) { gpu_metrics->current_vclk0[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency)[inst]); + SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency, flag)[inst]); gpu_metrics->current_dclk0[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency)[inst]); + SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency, flag)[inst]); } } } - gpu_metrics->current_uclk = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency)); + gpu_metrics->current_uclk = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency, flag)); + + /* Total accumulated cycle counter */ + gpu_metrics->accumulation_counter = GET_METRIC_FIELD(AccumulationCounter, flag); - /* Throttle status is not reported through metrics now */ - gpu_metrics->throttle_status = 0; + /* Accumulated throttler residencies */ + gpu_metrics->prochot_residency_acc = GET_METRIC_FIELD(ProchotResidencyAcc, flag); + gpu_metrics->ppt_residency_acc = GET_METRIC_FIELD(PptResidencyAcc, flag); + gpu_metrics->socket_thm_residency_acc = GET_METRIC_FIELD(SocketThmResidencyAcc, flag); + gpu_metrics->vr_thm_residency_acc = GET_METRIC_FIELD(VrThmResidencyAcc, flag); + gpu_metrics->hbm_thm_residency_acc = GET_METRIC_FIELD(HbmThmResidencyAcc, flag); /* Clock Lock Status. Each bit corresponds to each GFXCLK instance */ - gpu_metrics->gfxclk_lock_status = GET_METRIC_FIELD(GfxLockXCDMak) >> GET_INST(GC, 0); + gpu_metrics->gfxclk_lock_status = GET_METRIC_FIELD(GfxLockXCDMak, flag) >> GET_INST(GC, 0); if (!(adev->flags & AMD_IS_APU)) { /*Check smu version, PCIE link speed and width will be reported from pmfw metric @@ -2399,41 +2429,80 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table metrics_x->PCIeNAKSentCountAcc; gpu_metrics->pcie_nak_rcvd_count_acc = metrics_x->PCIeNAKReceivedCountAcc; + if (smu_v13_0_6_is_other_end_count_available(smu)) + gpu_metrics->pcie_lc_perf_other_end_recovery = + metrics_x->PCIeOtherEndRecoveryAcc; + } gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); gpu_metrics->gfx_activity_acc = - SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusyAcc)); + SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusyAcc, flag)); gpu_metrics->mem_activity_acc = - SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilizationAcc)); + SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilizationAcc, flag)); for (i = 0; i < NUM_XGMI_LINKS; i++) { gpu_metrics->xgmi_read_data_acc[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(XgmiReadDataSizeAcc)[i]); + SMUQ10_ROUND(GET_METRIC_FIELD(XgmiReadDataSizeAcc, flag)[i]); gpu_metrics->xgmi_write_data_acc[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWriteDataSizeAcc)[i]); - } + SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWriteDataSizeAcc, flag)[i]); + ret = amdgpu_get_xgmi_link_status(adev, i); + if (ret >= 0) + gpu_metrics->xgmi_link_status[i] = ret; + } + + gpu_metrics->num_partition = adev->xcp_mgr->num_xcps; + + apu_per_inst = (adev->flags & AMD_IS_APU) && (smu->smc_fw_version >= 0x04556A00); + smu_13_0_6_per_inst = !(adev->flags & AMD_IS_APU) && + (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) + == IP_VERSION(13, 0, 6)) && + (smu->smc_fw_version >= 0x556F00); + smu_13_0_14_per_inst = !(adev->flags & AMD_IS_APU) && + (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) + == IP_VERSION(13, 0, 14)) && + (smu->smc_fw_version >= 0x05550B00); + + per_inst = apu_per_inst || smu_13_0_6_per_inst || smu_13_0_14_per_inst; + + for_each_xcp(adev->xcp_mgr, xcp, i) { + amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask); + idx = 0; + for_each_inst(k, inst_mask) { + /* Both JPEG and VCN has same instances */ + inst = GET_INST(VCN, k); + + for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { + gpu_metrics->xcp_stats[i].jpeg_busy + [(idx * adev->jpeg.num_jpeg_rings) + j] = + SMUQ10_ROUND(GET_METRIC_FIELD(JpegBusy, flag) + [(inst * adev->jpeg.num_jpeg_rings) + j]); + } + gpu_metrics->xcp_stats[i].vcn_busy[idx] = + SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy, flag)[inst]); + idx++; - for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { - inst = GET_INST(JPEG, i); - for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { - gpu_metrics->jpeg_activity[(i * adev->jpeg.num_jpeg_rings) + j] = - SMUQ10_ROUND(GET_METRIC_FIELD(JpegBusy) - [(inst * adev->jpeg.num_jpeg_rings) + j]); } - } - for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { - inst = GET_INST(VCN, i); - gpu_metrics->vcn_activity[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy)[inst]); + if (per_inst) { + amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask); + idx = 0; + for_each_inst(k, inst_mask) { + inst = GET_INST(GC, k); + gpu_metrics->xcp_stats[i].gfx_busy_inst[idx] = + SMUQ10_ROUND(metrics_x->GfxBusy[inst]); + gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] = + SMUQ10_ROUND(metrics_x->GfxBusyAcc[inst]); + idx++; + } + } } - gpu_metrics->xgmi_link_width = SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWidth)); - gpu_metrics->xgmi_link_speed = SMUQ10_ROUND(GET_METRIC_FIELD(XgmiBitrate)); + gpu_metrics->xgmi_link_width = SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWidth, flag)); + gpu_metrics->xgmi_link_speed = SMUQ10_ROUND(GET_METRIC_FIELD(XgmiBitrate, flag)); - gpu_metrics->firmware_timestamp = GET_METRIC_FIELD(Timestamp); + gpu_metrics->firmware_timestamp = GET_METRIC_FIELD(Timestamp, flag); *table = (void *)gpu_metrics; kfree(metrics_x); @@ -2974,6 +3043,16 @@ static int mmhub_err_codes[] = { CODE_VML2, CODE_VML2_WALKER, CODE_MMCANE, }; +static int vcn_err_codes[] = { + CODE_VIDD, CODE_VIDV, +}; +static int jpeg_err_codes[] = { + CODE_JPEG0S, CODE_JPEG0D, CODE_JPEG1S, CODE_JPEG1D, + CODE_JPEG2S, CODE_JPEG2D, CODE_JPEG3S, CODE_JPEG3D, + CODE_JPEG4S, CODE_JPEG4D, CODE_JPEG5S, CODE_JPEG5D, + CODE_JPEG6S, CODE_JPEG6D, CODE_JPEG7S, CODE_JPEG7D, +}; + static const struct mca_ras_info mca_ras_table[] = { { .blkid = AMDGPU_RAS_BLOCK__UMC, @@ -3002,6 +3081,20 @@ static const struct mca_ras_info mca_ras_table[] = { .blkid = AMDGPU_RAS_BLOCK__XGMI_WAFL, .ip = AMDGPU_MCA_IP_PCS_XGMI, .get_err_count = mca_pcs_xgmi_mca_get_err_count, + }, { + .blkid = AMDGPU_RAS_BLOCK__VCN, + .ip = AMDGPU_MCA_IP_SMU, + .err_code_array = vcn_err_codes, + .err_code_count = ARRAY_SIZE(vcn_err_codes), + .get_err_count = mca_smu_mca_get_err_count, + .bank_is_valid = mca_smu_bank_is_valid, + }, { + .blkid = AMDGPU_RAS_BLOCK__JPEG, + .ip = AMDGPU_MCA_IP_SMU, + .err_code_array = jpeg_err_codes, + .err_code_count = ARRAY_SIZE(jpeg_err_codes), + .get_err_count = mca_smu_mca_get_err_count, + .bank_is_valid = mca_smu_bank_is_valid, }, }; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index b891a5e0a396..f4ac403b8b36 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -83,6 +83,8 @@ #define PP_OD_FEATURE_FAN_ACOUSTIC_TARGET 8 #define PP_OD_FEATURE_FAN_TARGET_TEMPERATURE 9 #define PP_OD_FEATURE_FAN_MINIMUM_PWM 10 +#define PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE 11 +#define PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP 12 #define LINK_SPEED_MAX 3 @@ -734,19 +736,6 @@ static bool smu_v13_0_7_is_dpm_running(struct smu_context *smu) return !!(feature_enabled & SMC_DPM_FEATURE); } -static void smu_v13_0_7_dump_pptable(struct smu_context *smu) -{ - struct smu_table_context *table_context = &smu->smu_table; - PPTable_t *pptable = table_context->driver_pptable; - SkuTable_t *skutable = &pptable->SkuTable; - - dev_info(smu->adev->dev, "Dumped PPTable:\n"); - - dev_info(smu->adev->dev, "Version = 0x%08x\n", skutable->Version); - dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", skutable->FeaturesToRun[0]); - dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", skutable->FeaturesToRun[1]); -} - static uint32_t smu_v13_0_7_get_throttler_status(SmuMetrics_t *metrics) { uint32_t throttler_status = 0; @@ -1132,6 +1121,14 @@ static void smu_v13_0_7_get_od_setting_limits(struct smu_context *smu, od_min_setting = overdrive_lowerlimits->FanMinimumPwm; od_max_setting = overdrive_upperlimits->FanMinimumPwm; break; + case PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE: + od_min_setting = overdrive_lowerlimits->FanZeroRpmEnable; + od_max_setting = overdrive_upperlimits->FanZeroRpmEnable; + break; + case PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP: + od_min_setting = overdrive_lowerlimits->FanZeroRpmStopTemp; + od_max_setting = overdrive_upperlimits->FanZeroRpmStopTemp; + break; default: od_min_setting = od_max_setting = INT_MAX; break; @@ -1452,6 +1449,42 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu, min_value, max_value); break; + case SMU_OD_FAN_ZERO_RPM_ENABLE: + if (!smu_v13_0_7_is_od_feature_supported(smu, + PP_OD_FEATURE_ZERO_FAN_BIT)) + break; + + size += sysfs_emit_at(buf, size, "FAN_ZERO_RPM_ENABLE:\n"); + size += sysfs_emit_at(buf, size, "%d\n", + (int)od_table->OverDriveTable.FanZeroRpmEnable); + + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + smu_v13_0_7_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE, + &min_value, + &max_value); + size += sysfs_emit_at(buf, size, "ZERO_RPM_ENABLE: %u %u\n", + min_value, max_value); + break; + + case SMU_OD_FAN_ZERO_RPM_STOP_TEMP: + if (!smu_v13_0_7_is_od_feature_supported(smu, + PP_OD_FEATURE_ZERO_FAN_BIT)) + break; + + size += sysfs_emit_at(buf, size, "FAN_ZERO_RPM_STOP_TEMPERATURE:\n"); + size += sysfs_emit_at(buf, size, "%d\n", + (int)od_table->OverDriveTable.FanZeroRpmStopTemp); + + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + smu_v13_0_7_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP, + &min_value, + &max_value); + size += sysfs_emit_at(buf, size, "ZERO_RPM_STOP_TEMPERATURE: %u %u\n", + min_value, max_value); + break; + case SMU_OD_RANGE: if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT) && !smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT) && @@ -1548,6 +1581,16 @@ static int smu_v13_0_7_od_restore_table_single(struct smu_context *smu, long inp od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); break; + case PP_OD_EDIT_FAN_ZERO_RPM_ENABLE: + od_table->OverDriveTable.FanZeroRpmEnable = + boot_overdrive_table->OverDriveTable.FanZeroRpmEnable; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT); + break; + case PP_OD_EDIT_FAN_ZERO_RPM_STOP_TEMP: + od_table->OverDriveTable.FanZeroRpmStopTemp = + boot_overdrive_table->OverDriveTable.FanZeroRpmStopTemp; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT); + break; default: dev_info(adev->dev, "Invalid table index: %ld\n", input); return -EINVAL; @@ -1841,6 +1884,48 @@ static int smu_v13_0_7_od_edit_dpm_table(struct smu_context *smu, od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); break; + case PP_OD_EDIT_FAN_ZERO_RPM_ENABLE: + if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_ZERO_FAN_BIT)) { + dev_warn(adev->dev, "Zero RPM setting not supported!\n"); + return -ENOTSUPP; + } + + smu_v13_0_7_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE, + &minimum, + &maximum); + if (input[0] < minimum || + input[0] > maximum) { + dev_info(adev->dev, "zero RPM enable setting(%ld) must be within [%d, %d]!\n", + input[0], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.FanZeroRpmEnable = input[0]; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT); + break; + + case PP_OD_EDIT_FAN_ZERO_RPM_STOP_TEMP: + if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_ZERO_FAN_BIT)) { + dev_warn(adev->dev, "Zero RPM setting not supported!\n"); + return -ENOTSUPP; + } + + smu_v13_0_7_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP, + &minimum, + &maximum); + if (input[0] < minimum || + input[0] > maximum) { + dev_info(adev->dev, "zero RPM stop temperature setting(%ld) must be within [%d, %d]!\n", + input[0], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.FanZeroRpmStopTemp = input[0]; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT); + break; + case PP_OD_RESTORE_DEFAULT_TABLE: if (size == 1) { ret = smu_v13_0_7_od_restore_table_single(smu, input[0]); @@ -1964,7 +2049,8 @@ static int smu_v13_0_7_force_clk_levels(struct smu_context *smu, ret = smu_v13_0_set_soft_freq_limited_range(smu, clk_type, min_freq, - max_freq); + max_freq, + false); break; case SMU_DCEFCLK: case SMU_PCIE: @@ -2061,6 +2147,8 @@ static ssize_t smu_v13_0_7_get_gpu_metrics(struct smu_context *smu, gpu_metrics->average_dclk1_frequency = metrics->AverageDclk1Frequency; gpu_metrics->current_gfxclk = metrics->CurrClock[PPCLK_GFXCLK]; + gpu_metrics->current_socclk = metrics->CurrClock[PPCLK_SOCCLK]; + gpu_metrics->current_uclk = metrics->CurrClock[PPCLK_UCLK]; gpu_metrics->current_vclk0 = metrics->CurrClock[PPCLK_VCLK_0]; gpu_metrics->current_dclk0 = metrics->CurrClock[PPCLK_DCLK_0]; gpu_metrics->current_vclk1 = metrics->CurrClock[PPCLK_VCLK_1]; @@ -2106,7 +2194,11 @@ static void smu_v13_0_7_set_supported_od_feature_mask(struct smu_context *smu) OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_RETRIEVE | OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET | OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE | - OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET; + OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET | + OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_RETRIEVE | + OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_SET | + OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_RETRIEVE | + OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_SET; } static int smu_v13_0_7_set_default_od_settings(struct smu_context *smu) @@ -2172,6 +2264,10 @@ static int smu_v13_0_7_set_default_od_settings(struct smu_context *smu) user_od_table_bak.OverDriveTable.FanTargetTemperature; user_od_table->OverDriveTable.FanMinimumPwm = user_od_table_bak.OverDriveTable.FanMinimumPwm; + user_od_table->OverDriveTable.FanZeroRpmEnable = + user_od_table_bak.OverDriveTable.FanZeroRpmEnable; + user_od_table->OverDriveTable.FanZeroRpmStopTemp = + user_od_table_bak.OverDriveTable.FanZeroRpmStopTemp; } smu_v13_0_7_set_supported_od_feature_mask(smu); @@ -2434,78 +2530,110 @@ out: return result; } -static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size) +#define SMU_13_0_7_CUSTOM_PARAMS_COUNT 8 +#define SMU_13_0_7_CUSTOM_PARAMS_CLOCK_COUNT 2 +#define SMU_13_0_7_CUSTOM_PARAMS_SIZE (SMU_13_0_7_CUSTOM_PARAMS_CLOCK_COUNT * SMU_13_0_7_CUSTOM_PARAMS_COUNT * sizeof(long)) + +static int smu_v13_0_7_set_power_profile_mode_coeff(struct smu_context *smu, + long *input) { DpmActivityMonitorCoeffIntExternal_t activity_monitor_external; DpmActivityMonitorCoeffInt_t *activity_monitor = &(activity_monitor_external.DpmActivityMonitorCoeffInt); - int workload_type, ret = 0; + int ret, idx; - smu->power_profile_mode = input[size]; + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor_external), false); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); + return ret; + } - if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_WINDOW3D) { - dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode); - return -EINVAL; + idx = 0 * SMU_13_0_7_CUSTOM_PARAMS_COUNT; + if (input[idx]) { + /* Gfxclk */ + activity_monitor->Gfx_ActiveHystLimit = input[idx + 1]; + activity_monitor->Gfx_IdleHystLimit = input[idx + 2]; + activity_monitor->Gfx_FPS = input[idx + 3]; + activity_monitor->Gfx_MinActiveFreqType = input[idx + 4]; + activity_monitor->Gfx_BoosterFreqType = input[idx + 5]; + activity_monitor->Gfx_MinActiveFreq = input[idx + 6]; + activity_monitor->Gfx_BoosterFreq = input[idx + 7]; + } + idx = 1 * SMU_13_0_7_CUSTOM_PARAMS_COUNT; + if (input[idx]) { + /* Fclk */ + activity_monitor->Fclk_ActiveHystLimit = input[idx + 1]; + activity_monitor->Fclk_IdleHystLimit = input[idx + 2]; + activity_monitor->Fclk_FPS = input[idx + 3]; + activity_monitor->Fclk_MinActiveFreqType = input[idx + 4]; + activity_monitor->Fclk_BoosterFreqType = input[idx + 5]; + activity_monitor->Fclk_MinActiveFreq = input[idx + 6]; + activity_monitor->Fclk_BoosterFreq = input[idx + 7]; } - if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { - if (size != 8) - return -EINVAL; + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor_external), true); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); + return ret; + } - ret = smu_cmn_update_table(smu, - SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, - (void *)(&activity_monitor_external), false); - if (ret) { - dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); - return ret; - } + return ret; +} - switch (input[0]) { - case 0: /* Gfxclk */ - activity_monitor->Gfx_ActiveHystLimit = input[1]; - activity_monitor->Gfx_IdleHystLimit = input[2]; - activity_monitor->Gfx_FPS = input[3]; - activity_monitor->Gfx_MinActiveFreqType = input[4]; - activity_monitor->Gfx_BoosterFreqType = input[5]; - activity_monitor->Gfx_MinActiveFreq = input[6]; - activity_monitor->Gfx_BoosterFreq = input[7]; - break; - case 1: /* Fclk */ - activity_monitor->Fclk_ActiveHystLimit = input[1]; - activity_monitor->Fclk_IdleHystLimit = input[2]; - activity_monitor->Fclk_FPS = input[3]; - activity_monitor->Fclk_MinActiveFreqType = input[4]; - activity_monitor->Fclk_BoosterFreqType = input[5]; - activity_monitor->Fclk_MinActiveFreq = input[6]; - activity_monitor->Fclk_BoosterFreq = input[7]; - break; - default: - return -EINVAL; +static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu, + u32 workload_mask, + long *custom_params, + u32 custom_params_max_idx) +{ + u32 backend_workload_mask = 0; + int ret, idx = -1, i; + + smu_cmn_get_backend_workload_mask(smu, workload_mask, + &backend_workload_mask); + + if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) { + if (!smu->custom_profile_params) { + smu->custom_profile_params = + kzalloc(SMU_13_0_7_CUSTOM_PARAMS_SIZE, GFP_KERNEL); + if (!smu->custom_profile_params) + return -ENOMEM; } - - ret = smu_cmn_update_table(smu, - SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, - (void *)(&activity_monitor_external), true); + if (custom_params && custom_params_max_idx) { + if (custom_params_max_idx != SMU_13_0_7_CUSTOM_PARAMS_COUNT) + return -EINVAL; + if (custom_params[0] >= SMU_13_0_7_CUSTOM_PARAMS_CLOCK_COUNT) + return -EINVAL; + idx = custom_params[0] * SMU_13_0_7_CUSTOM_PARAMS_COUNT; + smu->custom_profile_params[idx] = 1; + for (i = 1; i < custom_params_max_idx; i++) + smu->custom_profile_params[idx + i] = custom_params[i]; + } + ret = smu_v13_0_7_set_power_profile_mode_coeff(smu, + smu->custom_profile_params); if (ret) { - dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); + if (idx != -1) + smu->custom_profile_params[idx] = 0; return ret; } + } else if (smu->custom_profile_params) { + memset(smu->custom_profile_params, 0, SMU_13_0_7_CUSTOM_PARAMS_SIZE); } - /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, - smu->power_profile_mode); - if (workload_type < 0) - return -EINVAL; ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - 1 << workload_type, NULL); + backend_workload_mask, NULL); - if (ret) - dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__); - else - smu->workload_mask = (1 << workload_type); + if (ret) { + dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n", + workload_mask); + if (idx != -1) + smu->custom_profile_params[idx] = 0; + return ret; + } return ret; } @@ -2605,7 +2733,6 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = { .get_allowed_feature_mask = smu_v13_0_7_get_allowed_feature_mask, .set_default_dpm_table = smu_v13_0_7_set_default_dpm_table, .is_dpm_running = smu_v13_0_7_is_dpm_running, - .dump_pptable = smu_v13_0_7_dump_pptable, .init_microcode = smu_v13_0_init_microcode, .load_microcode = smu_v13_0_load_microcode, .fini_microcode = smu_v13_0_fini_microcode, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c index 260c339f89c5..73b4506ef5a8 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c @@ -220,7 +220,9 @@ static int yellow_carp_system_features_control(struct smu_context *smu, bool en) return ret; } -static int yellow_carp_dpm_set_vcn_enable(struct smu_context *smu, bool enable) +static int yellow_carp_dpm_set_vcn_enable(struct smu_context *smu, + bool enable, + int inst) { int ret = 0; @@ -945,9 +947,10 @@ failed: } static int yellow_carp_set_soft_freq_limited_range(struct smu_context *smu, - enum smu_clk_type clk_type, - uint32_t min, - uint32_t max) + enum smu_clk_type clk_type, + uint32_t min, + uint32_t max, + bool automatic) { enum smu_message_type msg_set_min, msg_set_max; uint32_t min_clk = min; @@ -1134,7 +1137,7 @@ static int yellow_carp_force_clk_levels(struct smu_context *smu, if (ret) goto force_level_out; - ret = yellow_carp_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq); + ret = yellow_carp_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false); if (ret) goto force_level_out; break; @@ -1254,9 +1257,10 @@ static int yellow_carp_set_performance_level(struct smu_context *smu, if (sclk_min && sclk_max) { ret = yellow_carp_set_soft_freq_limited_range(smu, - SMU_SCLK, - sclk_min, - sclk_max); + SMU_SCLK, + sclk_min, + sclk_max, + false); if (ret) return ret; @@ -1266,18 +1270,20 @@ static int yellow_carp_set_performance_level(struct smu_context *smu, if (fclk_min && fclk_max) { ret = yellow_carp_set_soft_freq_limited_range(smu, - SMU_FCLK, - fclk_min, - fclk_max); + SMU_FCLK, + fclk_min, + fclk_max, + false); if (ret) return ret; } if (socclk_min && socclk_max) { ret = yellow_carp_set_soft_freq_limited_range(smu, - SMU_SOCCLK, - socclk_min, - socclk_max); + SMU_SOCCLK, + socclk_min, + socclk_max, + false); if (ret) return ret; } @@ -1286,7 +1292,8 @@ static int yellow_carp_set_performance_level(struct smu_context *smu, ret = yellow_carp_set_soft_freq_limited_range(smu, SMU_VCLK, vclk_min, - vclk_max); + vclk_max, + false); if (ret) return ret; } @@ -1295,7 +1302,8 @@ static int yellow_carp_set_performance_level(struct smu_context *smu, ret = yellow_carp_set_soft_freq_limited_range(smu, SMU_DCLK, dclk_min, - dclk_max); + dclk_max, + false); if (ret) return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c index 865e916fc425..a87040cb2f2e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c @@ -49,7 +49,7 @@ #define regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0_BASE_IDX 0 const int decoded_link_speed[5] = {1, 2, 3, 4, 5}; -const int decoded_link_width[7] = {0, 1, 2, 4, 8, 12, 16}; +const int decoded_link_width[8] = {0, 1, 2, 4, 8, 12, 16, 32}; /* * DO NOT use these for err/warn/info/debug messages. * Use dev_err, dev_warn, dev_info and dev_dbg instead. @@ -1102,7 +1102,8 @@ failed: int smu_v14_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, - uint32_t max) + uint32_t max, + bool automatic) { int ret = 0, clk_id = 0; uint32_t param; @@ -1117,7 +1118,10 @@ int smu_v14_0_set_soft_freq_limited_range(struct smu_context *smu, return clk_id; if (max > 0) { - param = (uint32_t)((clk_id << 16) | (max & 0xffff)); + if (automatic) + param = (uint32_t)((clk_id << 16) | 0xffff); + else + param = (uint32_t)((clk_id << 16) | (max & 0xffff)); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq, param, NULL); if (ret) @@ -1125,7 +1129,10 @@ int smu_v14_0_set_soft_freq_limited_range(struct smu_context *smu, } if (min > 0) { - param = (uint32_t)((clk_id << 16) | (min & 0xffff)); + if (automatic) + param = (uint32_t)((clk_id << 16) | 0); + else + param = (uint32_t)((clk_id << 16) | (min & 0xffff)); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq, param, NULL); if (ret) @@ -1202,6 +1209,7 @@ int smu_v14_0_set_performance_level(struct smu_context *smu, uint32_t dclk_min = 0, dclk_max = 0; uint32_t fclk_min = 0, fclk_max = 0; int ret = 0, i; + bool auto_level = false; switch (level) { case AMD_DPM_FORCED_LEVEL_HIGH: @@ -1233,6 +1241,7 @@ int smu_v14_0_set_performance_level(struct smu_context *smu, dclk_max = dclk_table->max; fclk_min = fclk_table->min; fclk_max = fclk_table->max; + auto_level = true; break; case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: sclk_min = sclk_max = pstate_table->gfxclk_pstate.standard; @@ -1268,7 +1277,8 @@ int smu_v14_0_set_performance_level(struct smu_context *smu, ret = smu_v14_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, sclk_min, - sclk_max); + sclk_max, + auto_level); if (ret) return ret; @@ -1280,7 +1290,8 @@ int smu_v14_0_set_performance_level(struct smu_context *smu, ret = smu_v14_0_set_soft_freq_limited_range(smu, SMU_MCLK, mclk_min, - mclk_max); + mclk_max, + auto_level); if (ret) return ret; @@ -1292,7 +1303,8 @@ int smu_v14_0_set_performance_level(struct smu_context *smu, ret = smu_v14_0_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_min, - socclk_max); + socclk_max, + auto_level); if (ret) return ret; @@ -1307,7 +1319,8 @@ int smu_v14_0_set_performance_level(struct smu_context *smu, ret = smu_v14_0_set_soft_freq_limited_range(smu, i ? SMU_VCLK1 : SMU_VCLK, vclk_min, - vclk_max); + vclk_max, + auto_level); if (ret) return ret; } @@ -1322,7 +1335,8 @@ int smu_v14_0_set_performance_level(struct smu_context *smu, ret = smu_v14_0_set_soft_freq_limited_range(smu, i ? SMU_DCLK1 : SMU_DCLK, dclk_min, - dclk_max); + dclk_max, + auto_level); if (ret) return ret; } @@ -1334,7 +1348,8 @@ int smu_v14_0_set_performance_level(struct smu_context *smu, ret = smu_v14_0_set_soft_freq_limited_range(smu, SMU_FCLK, fclk_min, - fclk_max); + fclk_max, + auto_level); if (ret) return ret; @@ -1492,7 +1507,8 @@ int smu_v14_0_set_single_dpm_table(struct smu_context *smu, } int smu_v14_0_set_vcn_enable(struct smu_context *smu, - bool enable) + bool enable, + int inst) { struct amdgpu_device *adev = smu->adev; int i, ret = 0; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c index 8798ebfcea83..84f9b007b59f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c @@ -1132,7 +1132,7 @@ static int smu_v14_0_common_get_dpm_level_count(struct smu_context *smu, static int smu_v14_0_0_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { - int i, size = 0, ret = 0; + int i, idx, ret = 0, size = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t min, max; @@ -1168,7 +1168,8 @@ static int smu_v14_0_0_print_clk_levels(struct smu_context *smu, break; for (i = 0; i < count; i++) { - ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, i, &value); + idx = (clk_type == SMU_MCLK) ? (count - i - 1) : i; + ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, idx, &value); if (ret) break; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c index 5899d01fa73d..6a565ce74d5b 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c @@ -367,54 +367,6 @@ static int smu_v14_0_2_store_powerplay_table(struct smu_context *smu) return 0; } -#ifndef atom_smc_dpm_info_table_14_0_0 -struct atom_smc_dpm_info_table_14_0_0 { - struct atom_common_table_header table_header; - BoardTable_t BoardTable; -}; -#endif - -static int smu_v14_0_2_append_powerplay_table(struct smu_context *smu) -{ - struct smu_table_context *table_context = &smu->smu_table; - PPTable_t *smc_pptable = table_context->driver_pptable; - struct atom_smc_dpm_info_table_14_0_0 *smc_dpm_table; - BoardTable_t *BoardTable = &smc_pptable->BoardTable; - int index, ret; - - index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, - smc_dpm_info); - - ret = amdgpu_atombios_get_data_table(smu->adev, index, NULL, NULL, NULL, - (uint8_t **)&smc_dpm_table); - if (ret) - return ret; - - memcpy(BoardTable, &smc_dpm_table->BoardTable, sizeof(BoardTable_t)); - - return 0; -} - -#if 0 -static int smu_v14_0_2_get_pptable_from_pmfw(struct smu_context *smu, - void **table, - uint32_t *size) -{ - struct smu_table_context *smu_table = &smu->smu_table; - void *combo_pptable = smu_table->combo_pptable; - int ret = 0; - - ret = smu_cmn_get_combo_pptable(smu); - if (ret) - return ret; - - *table = combo_pptable; - *size = sizeof(struct smu_14_0_powerplay_table); - - return 0; -} -#endif - static int smu_v14_0_2_get_pptable_from_pmfw(struct smu_context *smu, void **table, uint32_t *size) @@ -436,16 +388,12 @@ static int smu_v14_0_2_get_pptable_from_pmfw(struct smu_context *smu, static int smu_v14_0_2_setup_pptable(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; - struct amdgpu_device *adev = smu->adev; int ret = 0; if (amdgpu_sriov_vf(smu->adev)) return 0; - if (!adev->scpm_enabled) - ret = smu_v14_0_setup_pptable(smu); - else - ret = smu_v14_0_2_get_pptable_from_pmfw(smu, + ret = smu_v14_0_2_get_pptable_from_pmfw(smu, &smu_table->power_play_table, &smu_table->power_play_table_size); if (ret) @@ -455,16 +403,6 @@ static int smu_v14_0_2_setup_pptable(struct smu_context *smu) if (ret) return ret; - /* - * With SCPM enabled, the operation below will be handled - * by PSP. Driver involvment is unnecessary and useless. - */ - if (!adev->scpm_enabled) { - ret = smu_v14_0_2_append_powerplay_table(smu); - if (ret) - return ret; - } - ret = smu_v14_0_2_check_powerplay_table(smu); if (ret) return ret; @@ -732,19 +670,6 @@ static bool smu_v14_0_2_is_dpm_running(struct smu_context *smu) return !!(feature_enabled & SMC_DPM_FEATURE); } -static void smu_v14_0_2_dump_pptable(struct smu_context *smu) -{ - struct smu_table_context *table_context = &smu->smu_table; - PPTable_t *pptable = table_context->driver_pptable; - PFE_Settings_t *PFEsettings = &pptable->PFE_Settings; - - dev_info(smu->adev->dev, "Dumped PPTable:\n"); - - dev_info(smu->adev->dev, "Version = 0x%08x\n", PFEsettings->Version); - dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", PFEsettings->FeaturesToRun[0]); - dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", PFEsettings->FeaturesToRun[1]); -} - static uint32_t smu_v14_0_2_get_throttler_status(SmuMetrics_t *metrics) { uint32_t throttler_status = 0; @@ -1077,12 +1002,9 @@ static void smu_v14_0_2_get_od_setting_limits(struct smu_context *smu, switch (od_feature_bit) { case PP_OD_FEATURE_GFXCLK_FMIN: - od_min_setting = overdrive_lowerlimits->GfxclkFmin; - od_max_setting = overdrive_upperlimits->GfxclkFmin; - break; case PP_OD_FEATURE_GFXCLK_FMAX: - od_min_setting = overdrive_lowerlimits->GfxclkFmax; - od_max_setting = overdrive_upperlimits->GfxclkFmax; + od_min_setting = overdrive_lowerlimits->GfxclkFoffset; + od_max_setting = overdrive_upperlimits->GfxclkFoffset; break; case PP_OD_FEATURE_UCLK_FMIN: od_min_setting = overdrive_lowerlimits->UclkFmin; @@ -1251,13 +1173,15 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu, (pcie_table->pcie_gen[i] == 0) ? "2.5GT/s," : (pcie_table->pcie_gen[i] == 1) ? "5.0GT/s," : (pcie_table->pcie_gen[i] == 2) ? "8.0GT/s," : - (pcie_table->pcie_gen[i] == 3) ? "16.0GT/s," : "", + (pcie_table->pcie_gen[i] == 3) ? "16.0GT/s," : + (pcie_table->pcie_gen[i] == 4) ? "32.0GT/s," : "", (pcie_table->pcie_lane[i] == 1) ? "x1" : (pcie_table->pcie_lane[i] == 2) ? "x2" : (pcie_table->pcie_lane[i] == 3) ? "x4" : (pcie_table->pcie_lane[i] == 4) ? "x8" : (pcie_table->pcie_lane[i] == 5) ? "x12" : - (pcie_table->pcie_lane[i] == 6) ? "x16" : "", + (pcie_table->pcie_lane[i] == 6) ? "x16" : + (pcie_table->pcie_lane[i] == 7) ? "x32" : "", pcie_table->clk_freq[i], (gen_speed == DECODE_GEN_SPEED(pcie_table->pcie_gen[i])) && (lane_width == DECODE_LANE_WIDTH(pcie_table->pcie_lane[i])) ? @@ -1269,10 +1193,16 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu, PP_OD_FEATURE_GFXCLK_BIT)) break; - size += sysfs_emit_at(buf, size, "OD_SCLK:\n"); - size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n", - od_table->OverDriveTable.GfxclkFmin, - od_table->OverDriveTable.GfxclkFmax); + PPTable_t *pptable = smu->smu_table.driver_pptable; + const OverDriveLimits_t * const overdrive_upperlimits = + &pptable->SkuTable.OverDriveLimitsBasicMax; + const OverDriveLimits_t * const overdrive_lowerlimits = + &pptable->SkuTable.OverDriveLimitsBasicMin; + + size += sysfs_emit_at(buf, size, "OD_SCLK_OFFSET:\n"); + size += sysfs_emit_at(buf, size, "0: %dMhz\n1: %uMhz\n", + overdrive_lowerlimits->GfxclkFoffset, + overdrive_upperlimits->GfxclkFoffset); break; case SMU_OD_MCLK: @@ -1414,7 +1344,7 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu, PP_OD_FEATURE_GFXCLK_FMAX, NULL, &max_value); - size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", + size += sysfs_emit_at(buf, size, "SCLK_OFFSET: %7dMhz %10uMhz\n", min_value, max_value); } @@ -1516,7 +1446,8 @@ static int smu_v14_0_2_force_clk_levels(struct smu_context *smu, ret = smu_v14_0_set_soft_freq_limited_range(smu, clk_type, min_freq, - max_freq); + max_freq, + false); break; case SMU_DCEFCLK: case SMU_PCIE: @@ -1534,15 +1465,35 @@ static int smu_v14_0_2_update_pcie_parameters(struct smu_context *smu, struct smu_14_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; struct smu_14_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table; + int num_of_levels = pcie_table->num_of_link_levels; uint32_t smu_pcie_arg; int ret, i; - for (i = 0; i < pcie_table->num_of_link_levels; i++) { - if (pcie_table->pcie_gen[i] > pcie_gen_cap) + if (!num_of_levels) + return 0; + + if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK)) { + if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap) + pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1]; + + if (pcie_table->pcie_lane[num_of_levels - 1] < pcie_width_cap) + pcie_width_cap = pcie_table->pcie_lane[num_of_levels - 1]; + + /* Force all levels to use the same settings */ + for (i = 0; i < num_of_levels; i++) { pcie_table->pcie_gen[i] = pcie_gen_cap; - if (pcie_table->pcie_lane[i] > pcie_width_cap) pcie_table->pcie_lane[i] = pcie_width_cap; + } + } else { + for (i = 0; i < num_of_levels; i++) { + if (pcie_table->pcie_gen[i] > pcie_gen_cap) + pcie_table->pcie_gen[i] = pcie_gen_cap; + if (pcie_table->pcie_lane[i] > pcie_width_cap) + pcie_table->pcie_lane[i] = pcie_width_cap; + } + } + for (i = 0; i < num_of_levels; i++) { smu_pcie_arg = i << 16; smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; smu_pcie_arg |= pcie_table->pcie_lane[i]; @@ -1788,85 +1739,120 @@ static int smu_v14_0_2_get_power_profile_mode(struct smu_context *smu, return size; } -static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu, - long *input, - uint32_t size) +#define SMU_14_0_2_CUSTOM_PARAMS_COUNT 9 +#define SMU_14_0_2_CUSTOM_PARAMS_CLOCK_COUNT 2 +#define SMU_14_0_2_CUSTOM_PARAMS_SIZE (SMU_14_0_2_CUSTOM_PARAMS_CLOCK_COUNT * SMU_14_0_2_CUSTOM_PARAMS_COUNT * sizeof(long)) + +static int smu_v14_0_2_set_power_profile_mode_coeff(struct smu_context *smu, + long *input) { DpmActivityMonitorCoeffIntExternal_t activity_monitor_external; DpmActivityMonitorCoeffInt_t *activity_monitor = &(activity_monitor_external.DpmActivityMonitorCoeffInt); - int workload_type, ret = 0; + int ret, idx; - smu->power_profile_mode = input[size]; + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, + WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor_external), + false); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); + return ret; + } - if (smu->power_profile_mode >= PP_SMC_POWER_PROFILE_COUNT) { - dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode); - return -EINVAL; + idx = 0 * SMU_14_0_2_CUSTOM_PARAMS_COUNT; + if (input[idx]) { + /* Gfxclk */ + activity_monitor->Gfx_FPS = input[idx + 1]; + activity_monitor->Gfx_MinActiveFreqType = input[idx + 2]; + activity_monitor->Gfx_MinActiveFreq = input[idx + 3]; + activity_monitor->Gfx_BoosterFreqType = input[idx + 4]; + activity_monitor->Gfx_BoosterFreq = input[idx + 5]; + activity_monitor->Gfx_PD_Data_limit_c = input[idx + 6]; + activity_monitor->Gfx_PD_Data_error_coeff = input[idx + 7]; + activity_monitor->Gfx_PD_Data_error_rate_coeff = input[idx + 8]; + } + idx = 1 * SMU_14_0_2_CUSTOM_PARAMS_COUNT; + if (input[idx]) { + /* Fclk */ + activity_monitor->Fclk_FPS = input[idx + 1]; + activity_monitor->Fclk_MinActiveFreqType = input[idx + 2]; + activity_monitor->Fclk_MinActiveFreq = input[idx + 3]; + activity_monitor->Fclk_BoosterFreqType = input[idx + 4]; + activity_monitor->Fclk_BoosterFreq = input[idx + 5]; + activity_monitor->Fclk_PD_Data_limit_c = input[idx + 6]; + activity_monitor->Fclk_PD_Data_error_coeff = input[idx + 7]; + activity_monitor->Fclk_PD_Data_error_rate_coeff = input[idx + 8]; } - if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { - if (size != 9) - return -EINVAL; + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, + WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor_external), + true); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); + return ret; + } - ret = smu_cmn_update_table(smu, - SMU_TABLE_ACTIVITY_MONITOR_COEFF, - WORKLOAD_PPLIB_CUSTOM_BIT, - (void *)(&activity_monitor_external), - false); - if (ret) { - dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); - return ret; - } + return ret; +} - switch (input[0]) { - case 0: /* Gfxclk */ - activity_monitor->Gfx_FPS = input[1]; - activity_monitor->Gfx_MinActiveFreqType = input[2]; - activity_monitor->Gfx_MinActiveFreq = input[3]; - activity_monitor->Gfx_BoosterFreqType = input[4]; - activity_monitor->Gfx_BoosterFreq = input[5]; - activity_monitor->Gfx_PD_Data_limit_c = input[6]; - activity_monitor->Gfx_PD_Data_error_coeff = input[7]; - activity_monitor->Gfx_PD_Data_error_rate_coeff = input[8]; - break; - case 1: /* Fclk */ - activity_monitor->Fclk_FPS = input[1]; - activity_monitor->Fclk_MinActiveFreqType = input[2]; - activity_monitor->Fclk_MinActiveFreq = input[3]; - activity_monitor->Fclk_BoosterFreqType = input[4]; - activity_monitor->Fclk_BoosterFreq = input[5]; - activity_monitor->Fclk_PD_Data_limit_c = input[6]; - activity_monitor->Fclk_PD_Data_error_coeff = input[7]; - activity_monitor->Fclk_PD_Data_error_rate_coeff = input[8]; - break; - default: - return -EINVAL; - } +static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu, + u32 workload_mask, + long *custom_params, + u32 custom_params_max_idx) +{ + u32 backend_workload_mask = 0; + int ret, idx = -1, i; - ret = smu_cmn_update_table(smu, - SMU_TABLE_ACTIVITY_MONITOR_COEFF, - WORKLOAD_PPLIB_CUSTOM_BIT, - (void *)(&activity_monitor_external), - true); + smu_cmn_get_backend_workload_mask(smu, workload_mask, + &backend_workload_mask); + + /* disable deep sleep if compute is enabled */ + if (workload_mask & (1 << PP_SMC_POWER_PROFILE_COMPUTE)) + smu_v14_0_deep_sleep_control(smu, false); + else + smu_v14_0_deep_sleep_control(smu, true); + + if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) { + if (!smu->custom_profile_params) { + smu->custom_profile_params = + kzalloc(SMU_14_0_2_CUSTOM_PARAMS_SIZE, GFP_KERNEL); + if (!smu->custom_profile_params) + return -ENOMEM; + } + if (custom_params && custom_params_max_idx) { + if (custom_params_max_idx != SMU_14_0_2_CUSTOM_PARAMS_COUNT) + return -EINVAL; + if (custom_params[0] >= SMU_14_0_2_CUSTOM_PARAMS_CLOCK_COUNT) + return -EINVAL; + idx = custom_params[0] * SMU_14_0_2_CUSTOM_PARAMS_COUNT; + smu->custom_profile_params[idx] = 1; + for (i = 1; i < custom_params_max_idx; i++) + smu->custom_profile_params[idx + i] = custom_params[i]; + } + ret = smu_v14_0_2_set_power_profile_mode_coeff(smu, + smu->custom_profile_params); if (ret) { - dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); + if (idx != -1) + smu->custom_profile_params[idx] = 0; return ret; } + } else if (smu->custom_profile_params) { + memset(smu->custom_profile_params, 0, SMU_14_0_2_CUSTOM_PARAMS_SIZE); } - /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, - smu->power_profile_mode); - if (workload_type < 0) - return -EINVAL; - - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_SetWorkloadMask, - 1 << workload_type, - NULL); - if (!ret) - smu->workload_mask = 1 << workload_type; + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, + backend_workload_mask, NULL); + if (ret) { + dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n", + workload_mask); + if (idx != -1) + smu->custom_profile_params[idx] = 0; + return ret; + } return ret; } @@ -2158,7 +2144,7 @@ static ssize_t smu_v14_0_2_get_gpu_metrics(struct smu_context *smu, gpu_metrics->average_gfx_activity = metrics->AverageGfxActivity; gpu_metrics->average_umc_activity = metrics->AverageUclkActivity; - gpu_metrics->average_mm_activity = max(metrics->Vcn0ActivityPercentage, + gpu_metrics->average_mm_activity = max(metrics->AverageVcn0ActivityPercentage, metrics->Vcn1ActivityPercentage); gpu_metrics->average_socket_power = metrics->AverageSocketPower; @@ -2217,8 +2203,7 @@ static void smu_v14_0_2_dump_od_table(struct smu_context *smu, { struct amdgpu_device *adev = smu->adev; - dev_dbg(adev->dev, "OD: Gfxclk: (%d, %d)\n", od_table->OverDriveTable.GfxclkFmin, - od_table->OverDriveTable.GfxclkFmax); + dev_dbg(adev->dev, "OD: Gfxclk offset: (%d)\n", od_table->OverDriveTable.GfxclkFoffset); dev_dbg(adev->dev, "OD: Uclk: (%d, %d)\n", od_table->OverDriveTable.UclkFmin, od_table->OverDriveTable.UclkFmax); } @@ -2309,10 +2294,8 @@ static int smu_v14_0_2_set_default_od_settings(struct smu_context *smu) memcpy(user_od_table, boot_od_table, sizeof(OverDriveTableExternal_t)); - user_od_table->OverDriveTable.GfxclkFmin = - user_od_table_bak.OverDriveTable.GfxclkFmin; - user_od_table->OverDriveTable.GfxclkFmax = - user_od_table_bak.OverDriveTable.GfxclkFmax; + user_od_table->OverDriveTable.GfxclkFoffset = + user_od_table_bak.OverDriveTable.GfxclkFoffset; user_od_table->OverDriveTable.UclkFmin = user_od_table_bak.OverDriveTable.UclkFmin; user_od_table->OverDriveTable.UclkFmax = @@ -2441,22 +2424,6 @@ static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu, } switch (input[i]) { - case 0: - smu_v14_0_2_get_od_setting_limits(smu, - PP_OD_FEATURE_GFXCLK_FMIN, - &minimum, - &maximum); - if (input[i + 1] < minimum || - input[i + 1] > maximum) { - dev_info(adev->dev, "GfxclkFmin (%ld) must be within [%u, %u]!\n", - input[i + 1], minimum, maximum); - return -EINVAL; - } - - od_table->OverDriveTable.GfxclkFmin = input[i + 1]; - od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT; - break; - case 1: smu_v14_0_2_get_od_setting_limits(smu, PP_OD_FEATURE_GFXCLK_FMAX, @@ -2469,7 +2436,7 @@ static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu, return -EINVAL; } - od_table->OverDriveTable.GfxclkFmax = input[i + 1]; + od_table->OverDriveTable.GfxclkFoffset = input[i + 1]; od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT; break; @@ -2480,13 +2447,6 @@ static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu, } } - if (od_table->OverDriveTable.GfxclkFmin > od_table->OverDriveTable.GfxclkFmax) { - dev_err(adev->dev, - "Invalid setting: GfxclkFmin(%u) is bigger than GfxclkFmax(%u)\n", - (uint32_t)od_table->OverDriveTable.GfxclkFmin, - (uint32_t)od_table->OverDriveTable.GfxclkFmax); - return -EINVAL; - } break; case PP_OD_EDIT_MCLK_VDDC_TABLE: @@ -2806,7 +2766,6 @@ static const struct pptable_funcs smu_v14_0_2_ppt_funcs = { .i2c_init = smu_v14_0_2_i2c_control_init, .i2c_fini = smu_v14_0_2_i2c_control_fini, .is_dpm_running = smu_v14_0_2_is_dpm_running, - .dump_pptable = smu_v14_0_2_dump_pptable, .init_microcode = smu_v14_0_init_microcode, .load_microcode = smu_v14_0_load_microcode, .fini_microcode = smu_v14_0_fini_microcode, @@ -2817,7 +2776,6 @@ static const struct pptable_funcs smu_v14_0_2_ppt_funcs = { .check_fw_status = smu_v14_0_check_fw_status, .setup_pptable = smu_v14_0_2_setup_pptable, .check_fw_version = smu_v14_0_check_fw_version, - .write_pptable = smu_cmn_write_pptable, .set_driver_table_location = smu_v14_0_set_driver_table_location, .system_features_control = smu_v14_0_system_features_control, .set_allowed_mask = smu_v14_0_set_allowed_mask, @@ -2848,7 +2806,6 @@ static const struct pptable_funcs smu_v14_0_2_ppt_funcs = { .get_unique_id = smu_v14_0_2_get_unique_id, .get_power_limit = smu_v14_0_2_get_power_limit, .set_power_limit = smu_v14_0_2_set_power_limit, - .set_power_source = smu_v14_0_set_power_source, .get_power_profile_mode = smu_v14_0_2_get_power_profile_mode, .set_power_profile_mode = smu_v14_0_2_set_power_profile_mode, .run_btc = smu_v14_0_run_btc, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index 91ad434bcdae..9f55207ea9bc 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -1078,6 +1078,12 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev) case METRICS_VERSION(1, 5): structure_size = sizeof(struct gpu_metrics_v1_5); break; + case METRICS_VERSION(1, 6): + structure_size = sizeof(struct gpu_metrics_v1_6); + break; + case METRICS_VERSION(1, 7): + structure_size = sizeof(struct gpu_metrics_v1_7); + break; case METRICS_VERSION(2, 0): structure_size = sizeof(struct gpu_metrics_v2_0); break; @@ -1215,3 +1221,28 @@ void smu_cmn_generic_plpd_policy_desc(struct smu_dpm_policy *policy) { policy->desc = &xgmi_plpd_policy_desc; } + +void smu_cmn_get_backend_workload_mask(struct smu_context *smu, + u32 workload_mask, + u32 *backend_workload_mask) +{ + int workload_type; + u32 profile_mode; + + *backend_workload_mask = 0; + + for (profile_mode = 0; profile_mode < PP_SMC_POWER_PROFILE_COUNT; profile_mode++) { + if (!(workload_mask & (1 << profile_mode))) + continue; + + /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ + workload_type = smu_cmn_to_asic_specific_index(smu, + CMN2ASIC_MAPPING_WORKLOAD, + profile_mode); + + if (workload_type < 0) + continue; + + *backend_workload_mask |= 1 << workload_type; + } +} diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h index 1de685defe85..a020277dec3e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h @@ -147,5 +147,9 @@ bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev); void smu_cmn_generic_soc_policy_desc(struct smu_dpm_policy *policy); void smu_cmn_generic_plpd_policy_desc(struct smu_dpm_policy *policy); +void smu_cmn_get_backend_workload_mask(struct smu_context *smu, + u32 workload_mask, + u32 *backend_workload_mask); + #endif #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h index 6f4d212607d7..c09ecf1a68a0 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h @@ -78,7 +78,6 @@ #define smu_register_irq_handler(smu) smu_ppt_funcs(register_irq_handler, 0, smu) #define smu_get_dpm_ultimate_freq(smu, param, min, max) smu_ppt_funcs(get_dpm_ultimate_freq, 0, smu, param, min, max) #define smu_asic_set_performance_level(smu, level) smu_ppt_funcs(set_performance_level, -EINVAL, smu, level) -#define smu_dump_pptable(smu) smu_ppt_funcs(dump_pptable, 0, smu) #define smu_update_pcie_parameters(smu, pcie_gen_cap, pcie_width_cap) smu_ppt_funcs(update_pcie_parameters, 0, smu, pcie_gen_cap, pcie_width_cap) #define smu_set_power_source(smu, power_src) smu_ppt_funcs(set_power_source, 0, smu, power_src) #define smu_i2c_init(smu) smu_ppt_funcs(i2c_init, 0, smu) diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c index 6d475bb34002..d981d721e796 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c @@ -153,7 +153,7 @@ static const struct dev_pm_ops komeda_pm_ops = { static struct platform_driver komeda_platform_driver = { .probe = komeda_platform_probe, - .remove_new = komeda_platform_remove, + .remove = komeda_platform_remove, .shutdown = komeda_platform_shutdown, .driver = { .name = "komeda", diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c index f5d358f3893b..191b806624df 100644 --- a/drivers/gpu/drm/arm/hdlcd_drv.c +++ b/drivers/gpu/drm/arm/hdlcd_drv.c @@ -9,6 +9,7 @@ * ARM HDLCD Driver */ +#include <linux/aperture.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/clk.h> @@ -21,7 +22,6 @@ #include <linux/platform_device.h> #include <linux/pm_runtime.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_client_setup.h> #include <drm/drm_crtc.h> @@ -287,7 +287,7 @@ static int hdlcd_drm_bind(struct device *dev) */ if (hdlcd_read(hdlcd, HDLCD_REG_COMMAND)) { hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0); - drm_aperture_remove_framebuffers(&hdlcd_driver); + aperture_remove_all_conflicting_devices(hdlcd_driver.name); } drm_mode_config_reset(drm); @@ -405,7 +405,7 @@ static SIMPLE_DEV_PM_OPS(hdlcd_pm_ops, hdlcd_pm_suspend, hdlcd_pm_resume); static struct platform_driver hdlcd_platform_driver = { .probe = hdlcd_probe, - .remove_new = hdlcd_remove, + .remove = hdlcd_remove, .shutdown = hdlcd_shutdown, .driver = { .name = "hdlcd", diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c index 4cb25004b84f..fd2be80f3bf5 100644 --- a/drivers/gpu/drm/arm/malidp_drv.c +++ b/drivers/gpu/drm/arm/malidp_drv.c @@ -988,7 +988,7 @@ static const struct dev_pm_ops malidp_pm_ops = { static struct platform_driver malidp_platform_driver = { .probe = malidp_platform_probe, - .remove_new = malidp_platform_remove, + .remove = malidp_platform_remove, .shutdown = malidp_platform_shutdown, .driver = { .name = "mali-dp", diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c index c78687c755a8..0900e4466ffb 100644 --- a/drivers/gpu/drm/armada/armada_crtc.c +++ b/drivers/gpu/drm/armada/armada_crtc.c @@ -1084,7 +1084,7 @@ MODULE_DEVICE_TABLE(platform, armada_lcd_platform_ids); struct platform_driver armada_lcd_platform_driver = { .probe = armada_lcd_probe, - .remove_new = armada_lcd_remove, + .remove = armada_lcd_remove, .driver = { .name = "armada-lcd", .owner = THIS_MODULE, diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c index 3dc5f0499e4c..650e450cc19b 100644 --- a/drivers/gpu/drm/armada/armada_drv.c +++ b/drivers/gpu/drm/armada/armada_drv.c @@ -3,6 +3,7 @@ * Copyright (C) 2012 Russell King */ +#include <linux/aperture.h> #include <linux/clk.h> #include <linux/component.h> #include <linux/module.h> @@ -10,7 +11,6 @@ #include <linux/of_graph.h> #include <linux/platform_device.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_client_setup.h> #include <drm/drm_drv.h> @@ -93,7 +93,7 @@ static int armada_drm_bind(struct device *dev) } /* Remove early framebuffers */ - ret = drm_aperture_remove_framebuffers(&armada_drm_driver); + ret = aperture_remove_all_conflicting_devices(armada_drm_driver.name); if (ret) { dev_err(dev, "[" DRM_NAME ":%s] can't kick out simple-fb: %d\n", __func__, ret); @@ -250,7 +250,7 @@ MODULE_DEVICE_TABLE(platform, armada_drm_platform_ids); static struct platform_driver armada_drm_platform_driver = { .probe = armada_drm_probe, - .remove_new = armada_drm_remove, + .remove = armada_drm_remove, .shutdown = armada_drm_shutdown, .driver = { .name = "armada-drm", diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c index 26d10065d534..1a1680d71486 100644 --- a/drivers/gpu/drm/armada/armada_gem.c +++ b/drivers/gpu/drm/armada/armada_gem.c @@ -15,7 +15,7 @@ #include "armada_gem.h" #include "armada_ioctlP.h" -MODULE_IMPORT_NS(DMA_BUF); +MODULE_IMPORT_NS("DMA_BUF"); static vm_fault_t armada_gem_vm_fault(struct vm_fault *vmf) { diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c index 109023815fa2..b7e608ba6194 100644 --- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c +++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c @@ -368,7 +368,7 @@ static void aspeed_gfx_shutdown(struct platform_device *pdev) static struct platform_driver aspeed_gfx_platform_driver = { .probe = aspeed_gfx_probe, - .remove_new = aspeed_gfx_remove, + .remove = aspeed_gfx_remove, .shutdown = aspeed_gfx_shutdown, .driver = { .name = "aspeed_gfx", diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c index 898c8b93c0b1..4afe4be072ef 100644 --- a/drivers/gpu/drm/ast/ast_drv.c +++ b/drivers/gpu/drm/ast/ast_drv.c @@ -26,11 +26,11 @@ * Authors: Dave Airlie <airlied@redhat.com> */ +#include <linux/aperture.h> #include <linux/module.h> #include <linux/of.h> #include <linux/pci.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_client_setup.h> #include <drm/drm_drv.h> @@ -281,7 +281,7 @@ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct drm_device *drm; bool need_post = false; - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &ast_driver); + ret = aperture_remove_conflicting_pci_devices(pdev, ast_driver.name); if (ret) return ret; diff --git a/drivers/gpu/drm/ast/ast_sil164.c b/drivers/gpu/drm/ast/ast_sil164.c index 6a72268d2314..be01254dd48a 100644 --- a/drivers/gpu/drm/ast/ast_sil164.c +++ b/drivers/gpu/drm/ast/ast_sil164.c @@ -29,6 +29,8 @@ static int ast_sil164_connector_helper_get_modes(struct drm_connector *connector if (ast_connector->physical_status == connector_status_connected) { count = drm_connector_helper_get_modes(connector); } else { + drm_edid_connector_update(connector, NULL); + /* * There's no EDID data without a connected monitor. Set BMC- * compatible modes in this case. The XGA default resolution diff --git a/drivers/gpu/drm/ast/ast_vga.c b/drivers/gpu/drm/ast/ast_vga.c index 5c79b773af57..abe0fff8485c 100644 --- a/drivers/gpu/drm/ast/ast_vga.c +++ b/drivers/gpu/drm/ast/ast_vga.c @@ -29,6 +29,8 @@ static int ast_vga_connector_helper_get_modes(struct drm_connector *connector) if (ast_connector->physical_status == connector_status_connected) { count = drm_connector_helper_get_modes(connector); } else { + drm_edid_connector_update(connector, NULL); + /* * There's no EDID data without a connected monitor. Set BMC- * compatible modes in this case. The XGA default resolution diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c index 792dcc19e8e7..7b209af7cf45 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c @@ -937,7 +937,7 @@ static const struct of_device_id atmel_hlcdc_dc_of_match[] = { static struct platform_driver atmel_hlcdc_dc_platform_driver = { .probe = atmel_hlcdc_dc_drm_probe, - .remove_new = atmel_hlcdc_dc_drm_remove, + .remove = atmel_hlcdc_dc_drm_remove, .shutdown = atmel_hlcdc_dc_drm_shutdown, .driver = { .name = "atmel-hlcdc-display-controller", diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index 683cb33805b2..6b4664d91faa 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -90,6 +90,17 @@ config DRM_FSL_LDB help Support for i.MX8MP DPI-to-LVDS on-SoC encoder. +config DRM_ITE_IT6263 + tristate "ITE IT6263 LVDS/HDMI bridge" + depends on OF + select DRM_DISPLAY_HDMI_STATE_HELPER + select DRM_DISPLAY_HELPER + select DRM_BRIDGE_CONNECTOR + select DRM_KMS_HELPER + select REGMAP_I2C + help + ITE IT6263 LVDS to HDMI bridge chip driver. + config DRM_ITE_IT6505 tristate "ITE IT6505 DisplayPort bridge" depends on OF @@ -140,6 +151,8 @@ config DRM_LONTIUM_LT9611 select DRM_PANEL_BRIDGE select DRM_KMS_HELPER select DRM_MIPI_DSI + select DRM_DISPLAY_HELPER + select DRM_DISPLAY_HDMI_STATE_HELPER select REGMAP_I2C help Driver for Lontium LT9611 DSI to HDMI bridge diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile index 3daf803ce80b..97304b429a53 100644 --- a/drivers/gpu/drm/bridge/Makefile +++ b/drivers/gpu/drm/bridge/Makefile @@ -6,6 +6,7 @@ obj-$(CONFIG_DRM_CHRONTEL_CH7033) += chrontel-ch7033.o obj-$(CONFIG_DRM_CROS_EC_ANX7688) += cros-ec-anx7688.o obj-$(CONFIG_DRM_DISPLAY_CONNECTOR) += display-connector.o obj-$(CONFIG_DRM_FSL_LDB) += fsl-ldb.o +obj-$(CONFIG_DRM_ITE_IT6263) += ite-it6263.o obj-$(CONFIG_DRM_ITE_IT6505) += ite-it6505.o obj-$(CONFIG_DRM_LONTIUM_LT8912B) += lontium-lt8912b.o obj-$(CONFIG_DRM_LONTIUM_LT9211) += lontium-lt9211.o diff --git a/drivers/gpu/drm/bridge/aux-bridge.c b/drivers/gpu/drm/bridge/aux-bridge.c index b29980f95379..015983c015e5 100644 --- a/drivers/gpu/drm/bridge/aux-bridge.c +++ b/drivers/gpu/drm/bridge/aux-bridge.c @@ -58,9 +58,10 @@ int drm_aux_bridge_register(struct device *parent) adev->id = ret; adev->name = "aux_bridge"; adev->dev.parent = parent; - adev->dev.of_node = of_node_get(parent->of_node); adev->dev.release = drm_aux_bridge_release; + device_set_of_node_from_dev(&adev->dev, parent); + ret = auxiliary_device_init(adev); if (ret) { ida_free(&drm_aux_bridge_ida, adev->id); @@ -120,6 +121,10 @@ static int drm_aux_bridge_probe(struct auxiliary_device *auxdev, data->bridge.funcs = &drm_aux_bridge_funcs; data->bridge.of_node = data->dev->of_node; + /* passthrough data, allow everything */ + data->bridge.interlace_allowed = true; + data->bridge.ycbcr_420_allowed = true; + return devm_drm_bridge_add(data->dev, &data->bridge); } diff --git a/drivers/gpu/drm/bridge/aux-hpd-bridge.c b/drivers/gpu/drm/bridge/aux-hpd-bridge.c index 6886db2d9e00..48f297c78ee6 100644 --- a/drivers/gpu/drm/bridge/aux-hpd-bridge.c +++ b/drivers/gpu/drm/bridge/aux-hpd-bridge.c @@ -180,6 +180,10 @@ static int drm_aux_hpd_bridge_probe(struct auxiliary_device *auxdev, data->bridge.ops = DRM_BRIDGE_OP_HPD; data->bridge.type = id->driver_data; + /* passthrough data, allow everything */ + data->bridge.interlace_allowed = true; + data->bridge.ycbcr_420_allowed = true; + auxiliary_set_drvdata(auxdev, data); return devm_drm_bridge_add(data->dev, &data->bridge); diff --git a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c index 7457d38622b0..c7a0247e06ad 100644 --- a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c +++ b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c @@ -1300,7 +1300,7 @@ MODULE_DEVICE_TABLE(of, cdns_dsi_of_match); static struct platform_driver cdns_dsi_platform_driver = { .probe = cdns_dsi_drm_probe, - .remove_new = cdns_dsi_drm_remove, + .remove = cdns_dsi_drm_remove, .driver = { .name = "cdns-dsi", .of_match_table = cdns_dsi_of_match, diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c index 41f72d458487..d081850e3c03 100644 --- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c +++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c @@ -2656,7 +2656,7 @@ static struct platform_driver mhdp_driver = { .of_match_table = mhdp_ids, }, .probe = cdns_mhdp_probe, - .remove_new = cdns_mhdp_remove, + .remove = cdns_mhdp_remove, }; module_platform_driver(mhdp_driver); diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c index ab8e00baf3f1..72bc508d4e6e 100644 --- a/drivers/gpu/drm/bridge/display-connector.c +++ b/drivers/gpu/drm/bridge/display-connector.c @@ -270,6 +270,10 @@ static int display_connector_probe(struct platform_device *pdev) /* All the supported connector types support interlaced modes. */ conn->bridge.interlace_allowed = true; + if (type == DRM_MODE_CONNECTOR_HDMIA || + type == DRM_MODE_CONNECTOR_DisplayPort) + conn->bridge.ycbcr_420_allowed = true; + /* Get the optional connector label. */ of_property_read_string(pdev->dev.of_node, "label", &label); @@ -423,7 +427,7 @@ MODULE_DEVICE_TABLE(of, display_connector_match); static struct platform_driver display_connector_driver = { .probe = display_connector_probe, - .remove_new = display_connector_remove, + .remove = display_connector_remove, .driver = { .name = "display-connector", .of_match_table = display_connector_match, diff --git a/drivers/gpu/drm/bridge/fsl-ldb.c b/drivers/gpu/drm/bridge/fsl-ldb.c index 0e4bac7dd04f..0fc8a14fd800 100644 --- a/drivers/gpu/drm/bridge/fsl-ldb.c +++ b/drivers/gpu/drm/bridge/fsl-ldb.c @@ -393,7 +393,7 @@ MODULE_DEVICE_TABLE(of, fsl_ldb_match); static struct platform_driver fsl_ldb_driver = { .probe = fsl_ldb_probe, - .remove_new = fsl_ldb_remove, + .remove = fsl_ldb_remove, .driver = { .name = "fsl-ldb", .of_match_table = fsl_ldb_match, diff --git a/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c b/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c index 07a78a02a884..3ebf0b9866de 100644 --- a/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c +++ b/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c @@ -85,3 +85,4 @@ struct drm_bridge *devm_imx_drm_legacy_bridge(struct device *dev, EXPORT_SYMBOL_GPL(devm_imx_drm_legacy_bridge); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Freescale i.MX DRM bridge driver for legacy DT bindings"); diff --git a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c index 073e64dc200c..0d1ac3edcab4 100644 --- a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c +++ b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c @@ -193,7 +193,7 @@ MODULE_DEVICE_TABLE(of, imx8mp_hdmi_pvi_match); static struct platform_driver imx8mp_hdmi_pvi_driver = { .probe = imx8mp_hdmi_pvi_probe, - .remove_new = imx8mp_hdmi_pvi_remove, + .remove = imx8mp_hdmi_pvi_remove, .driver = { .name = "imx-hdmi-pvi", .of_match_table = imx8mp_hdmi_pvi_match, diff --git a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c index 8fcc6d18f4ab..1e7a789ec289 100644 --- a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c +++ b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c @@ -144,7 +144,7 @@ MODULE_DEVICE_TABLE(of, imx8mp_dw_hdmi_of_table); static struct platform_driver imx8mp_dw_hdmi_platform_driver = { .probe = imx8mp_dw_hdmi_probe, - .remove_new = imx8mp_dw_hdmi_remove, + .remove = imx8mp_dw_hdmi_remove, .driver = { .name = "imx8mp-dw-hdmi-tx", .of_match_table = imx8mp_dw_hdmi_of_table, diff --git a/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c b/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c index c879e37f5811..dd5823f04c70 100644 --- a/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c +++ b/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c @@ -570,7 +570,7 @@ MODULE_DEVICE_TABLE(of, imx8qm_ldb_dt_ids); static struct platform_driver imx8qm_ldb_driver = { .probe = imx8qm_ldb_probe, - .remove_new = imx8qm_ldb_remove, + .remove = imx8qm_ldb_remove, .driver = { .pm = pm_ptr(&imx8qm_ldb_pm_ops), .name = DRIVER_NAME, diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c index b33011f397f0..7bce2305d676 100644 --- a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c +++ b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c @@ -706,7 +706,7 @@ MODULE_DEVICE_TABLE(of, imx8qxp_ldb_dt_ids); static struct platform_driver imx8qxp_ldb_driver = { .probe = imx8qxp_ldb_probe, - .remove_new = imx8qxp_ldb_remove, + .remove = imx8qxp_ldb_remove, .driver = { .pm = pm_ptr(&imx8qxp_ldb_pm_ops), .name = DRIVER_NAME, diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c index ce43e4069e21..1812bd106261 100644 --- a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c +++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c @@ -427,7 +427,7 @@ MODULE_DEVICE_TABLE(of, imx8qxp_pc_dt_ids); static struct platform_driver imx8qxp_pc_bridge_driver = { .probe = imx8qxp_pc_bridge_probe, - .remove_new = imx8qxp_pc_bridge_remove, + .remove = imx8qxp_pc_bridge_remove, .driver = { .pm = pm_ptr(&imx8qxp_pc_pm_ops), .name = DRIVER_NAME, diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c index 1d11cc1df43c..4b0715ed6f38 100644 --- a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c +++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c @@ -409,7 +409,7 @@ MODULE_DEVICE_TABLE(of, imx8qxp_pixel_link_dt_ids); static struct platform_driver imx8qxp_pixel_link_bridge_driver = { .probe = imx8qxp_pixel_link_bridge_probe, - .remove_new = imx8qxp_pixel_link_bridge_remove, + .remove = imx8qxp_pixel_link_bridge_remove, .driver = { .of_match_table = imx8qxp_pixel_link_dt_ids, .name = DRIVER_NAME, diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c index fb7cf4369bb8..65cf3a6c8ec6 100644 --- a/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c +++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c @@ -467,7 +467,7 @@ MODULE_DEVICE_TABLE(of, imx8qxp_pxl2dpi_dt_ids); static struct platform_driver imx8qxp_pxl2dpi_bridge_driver = { .probe = imx8qxp_pxl2dpi_bridge_probe, - .remove_new = imx8qxp_pxl2dpi_bridge_remove, + .remove = imx8qxp_pxl2dpi_bridge_remove, .driver = { .of_match_table = imx8qxp_pxl2dpi_dt_ids, .name = DRIVER_NAME, diff --git a/drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c b/drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c index 2347f8dd632f..bea8346515b8 100644 --- a/drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c +++ b/drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c @@ -904,7 +904,7 @@ MODULE_DEVICE_TABLE(of, imx93_dsi_dt_ids); static struct platform_driver imx93_dsi_driver = { .probe = imx93_dsi_probe, - .remove_new = imx93_dsi_remove, + .remove = imx93_dsi_remove, .driver = { .of_match_table = imx93_dsi_dt_ids, .name = "imx93_mipi_dsi", diff --git a/drivers/gpu/drm/bridge/ite-it6263.c b/drivers/gpu/drm/bridge/ite-it6263.c new file mode 100644 index 000000000000..cbabd4e20d3e --- /dev/null +++ b/drivers/gpu/drm/bridge/ite-it6263.c @@ -0,0 +1,898 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2024 NXP + */ + +#include <linux/bitfield.h> +#include <linux/bits.h> +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/hdmi.h> +#include <linux/i2c.h> +#include <linux/media-bus-format.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/regmap.h> +#include <linux/regulator/consumer.h> + +#include <drm/display/drm_hdmi_helper.h> +#include <drm/display/drm_hdmi_state_helper.h> +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_atomic_state_helper.h> +#include <drm/drm_bridge.h> +#include <drm/drm_bridge_connector.h> +#include <drm/drm_connector.h> +#include <drm/drm_crtc.h> +#include <drm/drm_edid.h> +#include <drm/drm_of.h> +#include <drm/drm_probe_helper.h> + +/* ----------------------------------------------------------------------------- + * LVDS registers + */ + +/* LVDS software reset registers */ +#define LVDS_REG_05 0x05 +#define REG_SOFT_P_RST BIT(1) + +/* LVDS system configuration registers */ +/* 0x0b */ +#define LVDS_REG_0B 0x0b +#define REG_SSC_PCLK_RF BIT(0) +#define REG_LVDS_IN_SWAP BIT(1) + +/* LVDS test pattern gen control registers */ +/* 0x2c */ +#define LVDS_REG_2C 0x2c +#define REG_COL_DEP GENMASK(1, 0) +#define BIT8 FIELD_PREP(REG_COL_DEP, 1) +#define OUT_MAP BIT(4) +#define JEIDA 0 +#define REG_DESSC_ENB BIT(6) +#define DMODE BIT(7) +#define DISO BIT(7) +#define SISO 0 + +#define LVDS_REG_3C 0x3c +#define LVDS_REG_3F 0x3f +#define LVDS_REG_47 0x47 +#define LVDS_REG_48 0x48 +#define LVDS_REG_4F 0x4f +#define LVDS_REG_52 0x52 + +/* ----------------------------------------------------------------------------- + * HDMI registers are separated into three banks: + * 1) HDMI register common bank: 0x00 ~ 0x2f + */ + +/* HDMI genernal registers */ +#define HDMI_REG_SW_RST 0x04 +#define SOFTREF_RST BIT(5) +#define SOFTA_RST BIT(4) +#define SOFTV_RST BIT(3) +#define AUD_RST BIT(2) +#define HDCP_RST BIT(0) +#define HDMI_RST_ALL (SOFTREF_RST | SOFTA_RST | SOFTV_RST | \ + AUD_RST | HDCP_RST) + +#define HDMI_REG_SYS_STATUS 0x0e +#define HPDETECT BIT(6) +#define TXVIDSTABLE BIT(4) + +#define HDMI_REG_BANK_CTRL 0x0f +#define REG_BANK_SEL BIT(0) + +/* HDMI System DDC control registers */ +#define HDMI_REG_DDC_MASTER_CTRL 0x10 +#define MASTER_SEL_HOST BIT(0) + +#define HDMI_REG_DDC_HEADER 0x11 + +#define HDMI_REG_DDC_REQOFF 0x12 +#define HDMI_REG_DDC_REQCOUNT 0x13 +#define HDMI_REG_DDC_EDIDSEG 0x14 + +#define HDMI_REG_DDC_CMD 0x15 +#define DDC_CMD_EDID_READ 0x3 +#define DDC_CMD_FIFO_CLR 0x9 + +#define HDMI_REG_DDC_STATUS 0x16 +#define DDC_DONE BIT(7) +#define DDC_NOACK BIT(5) +#define DDC_WAITBUS BIT(4) +#define DDC_ARBILOSE BIT(3) +#define DDC_ERROR (DDC_NOACK | DDC_WAITBUS | DDC_ARBILOSE) + +#define HDMI_DDC_FIFO_BYTES 32 +#define HDMI_REG_DDC_READFIFO 0x17 +#define HDMI_REG_LVDS_PORT 0x1d /* LVDS input control I2C addr */ +#define HDMI_REG_LVDS_PORT_EN 0x1e +#define LVDS_INPUT_CTRL_I2C_ADDR 0x33 + +/* ----------------------------------------------------------------------------- + * 2) HDMI register bank0: 0x30 ~ 0xff + */ + +/* HDMI AFE registers */ +#define HDMI_REG_AFE_DRV_CTRL 0x61 +#define AFE_DRV_PWD BIT(5) +#define AFE_DRV_RST BIT(4) + +#define HDMI_REG_AFE_XP_CTRL 0x62 +#define AFE_XP_GAINBIT BIT(7) +#define AFE_XP_ER0 BIT(4) +#define AFE_XP_RESETB BIT(3) + +#define HDMI_REG_AFE_ISW_CTRL 0x63 + +#define HDMI_REG_AFE_IP_CTRL 0x64 +#define AFE_IP_GAINBIT BIT(7) +#define AFE_IP_ER0 BIT(3) +#define AFE_IP_RESETB BIT(2) + +/* HDMI input data format registers */ +#define HDMI_REG_INPUT_MODE 0x70 +#define IN_RGB 0x00 + +/* HDMI general control registers */ +#define HDMI_REG_HDMI_MODE 0xc0 +#define TX_HDMI_MODE BIT(0) + +#define HDMI_REG_GCP 0xc1 +#define AVMUTE BIT(0) +#define HDMI_COLOR_DEPTH GENMASK(6, 4) +#define HDMI_COLOR_DEPTH_24 FIELD_PREP(HDMI_COLOR_DEPTH, 4) + +#define HDMI_REG_PKT_GENERAL_CTRL 0xc6 +#define HDMI_REG_AVI_INFOFRM_CTRL 0xcd +#define ENABLE_PKT BIT(0) +#define REPEAT_PKT BIT(1) + +/* ----------------------------------------------------------------------------- + * 3) HDMI register bank1: 0x130 ~ 0x1ff (HDMI packet registers) + */ + +/* AVI packet registers */ +#define HDMI_REG_AVI_DB1 0x158 +#define HDMI_REG_AVI_DB2 0x159 +#define HDMI_REG_AVI_DB3 0x15a +#define HDMI_REG_AVI_DB4 0x15b +#define HDMI_REG_AVI_DB5 0x15c +#define HDMI_REG_AVI_CSUM 0x15d +#define HDMI_REG_AVI_DB6 0x15e +#define HDMI_REG_AVI_DB7 0x15f +#define HDMI_REG_AVI_DB8 0x160 +#define HDMI_REG_AVI_DB9 0x161 +#define HDMI_REG_AVI_DB10 0x162 +#define HDMI_REG_AVI_DB11 0x163 +#define HDMI_REG_AVI_DB12 0x164 +#define HDMI_REG_AVI_DB13 0x165 + +#define HDMI_AVI_DB_CHUNK1_SIZE (HDMI_REG_AVI_DB5 - HDMI_REG_AVI_DB1 + 1) +#define HDMI_AVI_DB_CHUNK2_SIZE (HDMI_REG_AVI_DB13 - HDMI_REG_AVI_DB6 + 1) + +/* IT6263 data sheet Rev0.8: LVDS RX supports input clock rate up to 150MHz. */ +#define MAX_PIXEL_CLOCK_KHZ 150000 + +/* IT6263 programming guide Ver0.90: PCLK_HIGH for TMDS clock over 80MHz. */ +#define HIGH_PIXEL_CLOCK_KHZ 80000 + +/* + * IT6263 data sheet Rev0.8: HDMI TX supports link speeds of up to 2.25Gbps + * (link clock rate of 225MHz). + */ +#define MAX_HDMI_TMDS_CHAR_RATE_HZ 225000000 + +struct it6263 { + struct device *dev; + struct i2c_client *hdmi_i2c; + struct i2c_client *lvds_i2c; + struct regmap *hdmi_regmap; + struct regmap *lvds_regmap; + struct drm_bridge bridge; + struct drm_bridge *next_bridge; + int lvds_data_mapping; + bool lvds_dual_link; + bool lvds_link12_swap; +}; + +static inline struct it6263 *bridge_to_it6263(struct drm_bridge *bridge) +{ + return container_of(bridge, struct it6263, bridge); +} + +static bool it6263_hdmi_writeable_reg(struct device *dev, unsigned int reg) +{ + switch (reg) { + case HDMI_REG_SW_RST: + case HDMI_REG_BANK_CTRL: + case HDMI_REG_DDC_MASTER_CTRL: + case HDMI_REG_DDC_HEADER: + case HDMI_REG_DDC_REQOFF: + case HDMI_REG_DDC_REQCOUNT: + case HDMI_REG_DDC_EDIDSEG: + case HDMI_REG_DDC_CMD: + case HDMI_REG_LVDS_PORT: + case HDMI_REG_LVDS_PORT_EN: + case HDMI_REG_AFE_DRV_CTRL: + case HDMI_REG_AFE_XP_CTRL: + case HDMI_REG_AFE_ISW_CTRL: + case HDMI_REG_AFE_IP_CTRL: + case HDMI_REG_INPUT_MODE: + case HDMI_REG_HDMI_MODE: + case HDMI_REG_GCP: + case HDMI_REG_PKT_GENERAL_CTRL: + case HDMI_REG_AVI_INFOFRM_CTRL: + case HDMI_REG_AVI_DB1: + case HDMI_REG_AVI_DB2: + case HDMI_REG_AVI_DB3: + case HDMI_REG_AVI_DB4: + case HDMI_REG_AVI_DB5: + case HDMI_REG_AVI_CSUM: + case HDMI_REG_AVI_DB6: + case HDMI_REG_AVI_DB7: + case HDMI_REG_AVI_DB8: + case HDMI_REG_AVI_DB9: + case HDMI_REG_AVI_DB10: + case HDMI_REG_AVI_DB11: + case HDMI_REG_AVI_DB12: + case HDMI_REG_AVI_DB13: + return true; + default: + return false; + } +} + +static bool it6263_hdmi_readable_reg(struct device *dev, unsigned int reg) +{ + if (it6263_hdmi_writeable_reg(dev, reg)) + return true; + + switch (reg) { + case HDMI_REG_SYS_STATUS: + case HDMI_REG_DDC_STATUS: + case HDMI_REG_DDC_READFIFO: + return true; + default: + return false; + } +} + +static bool it6263_hdmi_volatile_reg(struct device *dev, unsigned int reg) +{ + switch (reg) { + case HDMI_REG_SW_RST: + case HDMI_REG_SYS_STATUS: + case HDMI_REG_DDC_STATUS: + case HDMI_REG_DDC_READFIFO: + return true; + default: + return false; + } +} + +static const struct regmap_range_cfg it6263_hdmi_range_cfg = { + .range_min = 0x00, + .range_max = HDMI_REG_AVI_DB13, + .selector_reg = HDMI_REG_BANK_CTRL, + .selector_mask = REG_BANK_SEL, + .selector_shift = 0, + .window_start = 0x00, + .window_len = 0x100, +}; + +static const struct regmap_config it6263_hdmi_regmap_config = { + .name = "it6263-hdmi", + .reg_bits = 8, + .val_bits = 8, + .writeable_reg = it6263_hdmi_writeable_reg, + .readable_reg = it6263_hdmi_readable_reg, + .volatile_reg = it6263_hdmi_volatile_reg, + .max_register = HDMI_REG_AVI_DB13, + .ranges = &it6263_hdmi_range_cfg, + .num_ranges = 1, + .cache_type = REGCACHE_MAPLE, +}; + +static bool it6263_lvds_writeable_reg(struct device *dev, unsigned int reg) +{ + switch (reg) { + case LVDS_REG_05: + case LVDS_REG_0B: + case LVDS_REG_2C: + case LVDS_REG_3C: + case LVDS_REG_3F: + case LVDS_REG_47: + case LVDS_REG_48: + case LVDS_REG_4F: + case LVDS_REG_52: + return true; + default: + return false; + } +} + +static bool it6263_lvds_readable_reg(struct device *dev, unsigned int reg) +{ + return it6263_lvds_writeable_reg(dev, reg); +} + +static bool it6263_lvds_volatile_reg(struct device *dev, unsigned int reg) +{ + return reg == LVDS_REG_05; +} + +static const struct regmap_config it6263_lvds_regmap_config = { + .name = "it6263-lvds", + .reg_bits = 8, + .val_bits = 8, + .writeable_reg = it6263_lvds_writeable_reg, + .readable_reg = it6263_lvds_readable_reg, + .volatile_reg = it6263_lvds_volatile_reg, + .max_register = LVDS_REG_52, + .cache_type = REGCACHE_MAPLE, +}; + +static const char * const it6263_supplies[] = { + "ivdd", "ovdd", "txavcc18", "txavcc33", "pvcc1", "pvcc2", + "avcc", "anvdd", "apvdd" +}; + +static int it6263_parse_dt(struct it6263 *it) +{ + struct device *dev = it->dev; + struct device_node *port0, *port1; + int ret = 0; + + it->lvds_data_mapping = drm_of_lvds_get_data_mapping(dev->of_node); + if (it->lvds_data_mapping < 0) { + dev_err(dev, "%pOF: invalid or missing %s DT property: %d\n", + dev->of_node, "data-mapping", it->lvds_data_mapping); + return it->lvds_data_mapping; + } + + it->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 2, 0); + if (IS_ERR(it->next_bridge)) + return dev_err_probe(dev, PTR_ERR(it->next_bridge), + "failed to get next bridge\n"); + + port0 = of_graph_get_port_by_id(dev->of_node, 0); + port1 = of_graph_get_port_by_id(dev->of_node, 1); + if (port0 && port1) { + int order; + + it->lvds_dual_link = true; + order = drm_of_lvds_get_dual_link_pixel_order_sink(port0, port1); + if (order < 0) { + dev_err(dev, + "failed to get dual link pixel order: %d\n", + order); + ret = order; + } else if (order == DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS) { + it->lvds_link12_swap = true; + } + } else if (port1) { + ret = -EINVAL; + dev_err(dev, "single input LVDS port1 is not supported\n"); + } else if (!port0) { + ret = -EINVAL; + dev_err(dev, "no input LVDS port\n"); + } + + of_node_put(port0); + of_node_put(port1); + + return ret; +} + +static inline void it6263_hw_reset(struct gpio_desc *reset_gpio) +{ + if (!reset_gpio) + return; + + gpiod_set_value_cansleep(reset_gpio, 0); + fsleep(1000); + gpiod_set_value_cansleep(reset_gpio, 1); + /* The chip maker says the low pulse should be at least 40ms. */ + fsleep(40000); + gpiod_set_value_cansleep(reset_gpio, 0); + /* addtional time to wait the high voltage to be stable */ + fsleep(5000); +} + +static inline int it6263_lvds_set_i2c_addr(struct it6263 *it) +{ + int ret; + + ret = regmap_write(it->hdmi_regmap, HDMI_REG_LVDS_PORT, + LVDS_INPUT_CTRL_I2C_ADDR << 1); + if (ret) + return ret; + + return regmap_write(it->hdmi_regmap, HDMI_REG_LVDS_PORT_EN, BIT(0)); +} + +static inline void it6263_lvds_reset(struct it6263 *it) +{ + /* AFE PLL reset */ + regmap_write_bits(it->lvds_regmap, LVDS_REG_3C, BIT(0), 0x0); + fsleep(1000); + regmap_write_bits(it->lvds_regmap, LVDS_REG_3C, BIT(0), BIT(0)); + + /* software pixel clock domain reset */ + regmap_write_bits(it->lvds_regmap, LVDS_REG_05, REG_SOFT_P_RST, + REG_SOFT_P_RST); + fsleep(1000); + regmap_write_bits(it->lvds_regmap, LVDS_REG_05, REG_SOFT_P_RST, 0x0); + fsleep(10000); +} + +static inline void it6263_lvds_set_interface(struct it6263 *it) +{ + /* color depth */ + regmap_write_bits(it->lvds_regmap, LVDS_REG_2C, REG_COL_DEP, BIT8); + /* output mapping */ + regmap_write_bits(it->lvds_regmap, LVDS_REG_2C, OUT_MAP, JEIDA); + + if (it->lvds_dual_link) { + regmap_write_bits(it->lvds_regmap, LVDS_REG_2C, DMODE, DISO); + regmap_write_bits(it->lvds_regmap, LVDS_REG_52, BIT(1), BIT(1)); + } else { + regmap_write_bits(it->lvds_regmap, LVDS_REG_2C, DMODE, SISO); + regmap_write_bits(it->lvds_regmap, LVDS_REG_52, BIT(1), 0); + } +} + +static inline void it6263_lvds_set_afe(struct it6263 *it) +{ + regmap_write(it->lvds_regmap, LVDS_REG_3C, 0xaa); + regmap_write(it->lvds_regmap, LVDS_REG_3F, 0x02); + regmap_write(it->lvds_regmap, LVDS_REG_47, 0xaa); + regmap_write(it->lvds_regmap, LVDS_REG_48, 0x02); + regmap_write(it->lvds_regmap, LVDS_REG_4F, 0x11); + + regmap_write_bits(it->lvds_regmap, LVDS_REG_0B, REG_SSC_PCLK_RF, + REG_SSC_PCLK_RF); + regmap_write_bits(it->lvds_regmap, LVDS_REG_3C, 0x07, 0); + regmap_write_bits(it->lvds_regmap, LVDS_REG_2C, REG_DESSC_ENB, + REG_DESSC_ENB); +} + +static inline void it6263_lvds_sys_cfg(struct it6263 *it) +{ + regmap_write_bits(it->lvds_regmap, LVDS_REG_0B, REG_LVDS_IN_SWAP, + it->lvds_link12_swap ? REG_LVDS_IN_SWAP : 0); +} + +static inline void it6263_lvds_config(struct it6263 *it) +{ + it6263_lvds_reset(it); + it6263_lvds_set_interface(it); + it6263_lvds_set_afe(it); + it6263_lvds_sys_cfg(it); +} + +static inline void it6263_hdmi_config(struct it6263 *it) +{ + regmap_write(it->hdmi_regmap, HDMI_REG_SW_RST, HDMI_RST_ALL); + regmap_write(it->hdmi_regmap, HDMI_REG_INPUT_MODE, IN_RGB); + regmap_write_bits(it->hdmi_regmap, HDMI_REG_GCP, HDMI_COLOR_DEPTH, + HDMI_COLOR_DEPTH_24); +} + +static enum drm_connector_status it6263_detect(struct it6263 *it) +{ + unsigned int val; + + regmap_read(it->hdmi_regmap, HDMI_REG_SYS_STATUS, &val); + if (val & HPDETECT) + return connector_status_connected; + else + return connector_status_disconnected; +} + +static int it6263_read_edid(void *data, u8 *buf, unsigned int block, size_t len) +{ + struct it6263 *it = data; + struct regmap *regmap = it->hdmi_regmap; + unsigned int start = (block % 2) * EDID_LENGTH; + unsigned int segment = block >> 1; + unsigned int count, val; + int ret; + + regmap_write(regmap, HDMI_REG_DDC_MASTER_CTRL, MASTER_SEL_HOST); + regmap_write(regmap, HDMI_REG_DDC_HEADER, DDC_ADDR << 1); + regmap_write(regmap, HDMI_REG_DDC_EDIDSEG, segment); + + while (len) { + /* clear DDC FIFO */ + regmap_write(regmap, HDMI_REG_DDC_CMD, DDC_CMD_FIFO_CLR); + + ret = regmap_read_poll_timeout(regmap, HDMI_REG_DDC_STATUS, + val, val & DDC_DONE, + 2000, 10000); + if (ret) { + dev_err(it->dev, "failed to clear DDC FIFO:%d\n", ret); + return ret; + } + + count = len > HDMI_DDC_FIFO_BYTES ? HDMI_DDC_FIFO_BYTES : len; + + /* fire the read command */ + regmap_write(regmap, HDMI_REG_DDC_REQOFF, start); + regmap_write(regmap, HDMI_REG_DDC_REQCOUNT, count); + regmap_write(regmap, HDMI_REG_DDC_CMD, DDC_CMD_EDID_READ); + + start += count; + len -= count; + + ret = regmap_read_poll_timeout(regmap, HDMI_REG_DDC_STATUS, val, + val & (DDC_DONE | DDC_ERROR), + 20000, 250000); + if (ret && !(val & DDC_ERROR)) { + dev_err(it->dev, "failed to read EDID:%d\n", ret); + return ret; + } + + if (val & DDC_ERROR) { + dev_err(it->dev, "DDC error\n"); + return -EIO; + } + + /* cache to buffer */ + for (; count > 0; count--) { + regmap_read(regmap, HDMI_REG_DDC_READFIFO, &val); + *(buf++) = val; + } + } + + return 0; +} + +static int it6263_bridge_atomic_check(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + return drm_atomic_helper_connector_hdmi_check(conn_state->connector, + conn_state->state); +} + +static void +it6263_bridge_atomic_disable(struct drm_bridge *bridge, + struct drm_bridge_state *old_bridge_state) +{ + struct it6263 *it = bridge_to_it6263(bridge); + + regmap_write_bits(it->hdmi_regmap, HDMI_REG_GCP, AVMUTE, AVMUTE); + regmap_write(it->hdmi_regmap, HDMI_REG_PKT_GENERAL_CTRL, 0); + regmap_write(it->hdmi_regmap, HDMI_REG_AFE_DRV_CTRL, + AFE_DRV_RST | AFE_DRV_PWD); +} + +static void +it6263_bridge_atomic_enable(struct drm_bridge *bridge, + struct drm_bridge_state *old_bridge_state) +{ + struct drm_atomic_state *state = old_bridge_state->base.state; + struct it6263 *it = bridge_to_it6263(bridge); + const struct drm_crtc_state *crtc_state; + struct regmap *regmap = it->hdmi_regmap; + const struct drm_display_mode *mode; + struct drm_connector *connector; + bool is_stable = false; + struct drm_crtc *crtc; + unsigned int val; + bool pclk_high; + int i, ret; + + connector = drm_atomic_get_new_connector_for_encoder(state, + bridge->encoder); + crtc = drm_atomic_get_new_connector_state(state, connector)->crtc; + crtc_state = drm_atomic_get_new_crtc_state(state, crtc); + mode = &crtc_state->adjusted_mode; + + regmap_write(regmap, HDMI_REG_HDMI_MODE, TX_HDMI_MODE); + + drm_atomic_helper_connector_hdmi_update_infoframes(connector, state); + + /* HDMI AFE setup */ + pclk_high = mode->clock > HIGH_PIXEL_CLOCK_KHZ; + regmap_write(regmap, HDMI_REG_AFE_DRV_CTRL, AFE_DRV_RST); + if (pclk_high) + regmap_write(regmap, HDMI_REG_AFE_XP_CTRL, + AFE_XP_GAINBIT | AFE_XP_RESETB); + else + regmap_write(regmap, HDMI_REG_AFE_XP_CTRL, + AFE_XP_ER0 | AFE_XP_RESETB); + regmap_write(regmap, HDMI_REG_AFE_ISW_CTRL, 0x10); + if (pclk_high) + regmap_write(regmap, HDMI_REG_AFE_IP_CTRL, + AFE_IP_GAINBIT | AFE_IP_RESETB); + else + regmap_write(regmap, HDMI_REG_AFE_IP_CTRL, + AFE_IP_ER0 | AFE_IP_RESETB); + + /* HDMI software video reset */ + regmap_write_bits(regmap, HDMI_REG_SW_RST, SOFTV_RST, SOFTV_RST); + fsleep(1000); + regmap_write_bits(regmap, HDMI_REG_SW_RST, SOFTV_RST, 0); + + /* reconfigure LVDS and retry several times in case video is instable */ + for (i = 0; i < 3; i++) { + ret = regmap_read_poll_timeout(regmap, HDMI_REG_SYS_STATUS, val, + val & TXVIDSTABLE, + 20000, 500000); + if (!ret) { + is_stable = true; + break; + } + + it6263_lvds_config(it); + } + + if (!is_stable) + dev_warn(it->dev, "failed to wait for video stable\n"); + + /* HDMI AFE reset release and power up */ + regmap_write(regmap, HDMI_REG_AFE_DRV_CTRL, 0); + + regmap_write_bits(regmap, HDMI_REG_GCP, AVMUTE, 0); + + regmap_write(regmap, HDMI_REG_PKT_GENERAL_CTRL, ENABLE_PKT | REPEAT_PKT); +} + +static enum drm_mode_status +it6263_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, + const struct drm_display_mode *mode) +{ + unsigned long long rate; + + rate = drm_hdmi_compute_mode_clock(mode, 8, HDMI_COLORSPACE_RGB); + if (rate == 0) + return MODE_NOCLOCK; + + return bridge->funcs->hdmi_tmds_char_rate_valid(bridge, mode, rate); +} + +static int it6263_bridge_attach(struct drm_bridge *bridge, + enum drm_bridge_attach_flags flags) +{ + struct it6263 *it = bridge_to_it6263(bridge); + struct drm_connector *connector; + int ret; + + ret = drm_bridge_attach(bridge->encoder, it->next_bridge, bridge, + flags | DRM_BRIDGE_ATTACH_NO_CONNECTOR); + if (ret < 0) + return ret; + + if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) + return 0; + + connector = drm_bridge_connector_init(bridge->dev, bridge->encoder); + if (IS_ERR(connector)) { + ret = PTR_ERR(connector); + dev_err(it->dev, "failed to initialize bridge connector: %d\n", + ret); + return ret; + } + + drm_connector_attach_encoder(connector, bridge->encoder); + + return 0; +} + +static enum drm_connector_status it6263_bridge_detect(struct drm_bridge *bridge) +{ + struct it6263 *it = bridge_to_it6263(bridge); + + return it6263_detect(it); +} + +static const struct drm_edid * +it6263_bridge_edid_read(struct drm_bridge *bridge, + struct drm_connector *connector) +{ + struct it6263 *it = bridge_to_it6263(bridge); + + return drm_edid_read_custom(connector, it6263_read_edid, it); +} + +static u32 * +it6263_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state, + u32 output_fmt, + unsigned int *num_input_fmts) +{ + struct it6263 *it = bridge_to_it6263(bridge); + u32 *input_fmts; + + *num_input_fmts = 0; + + if (it->lvds_data_mapping != MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA) + return NULL; + + input_fmts = kmalloc(sizeof(*input_fmts), GFP_KERNEL); + if (!input_fmts) + return NULL; + + input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA; + *num_input_fmts = 1; + + return input_fmts; +} + +static enum drm_mode_status +it6263_hdmi_tmds_char_rate_valid(const struct drm_bridge *bridge, + const struct drm_display_mode *mode, + unsigned long long tmds_rate) +{ + if (mode->clock > MAX_PIXEL_CLOCK_KHZ) + return MODE_CLOCK_HIGH; + + if (tmds_rate > MAX_HDMI_TMDS_CHAR_RATE_HZ) + return MODE_CLOCK_HIGH; + + return MODE_OK; +} + +static int it6263_hdmi_clear_infoframe(struct drm_bridge *bridge, + enum hdmi_infoframe_type type) +{ + struct it6263 *it = bridge_to_it6263(bridge); + + if (type == HDMI_INFOFRAME_TYPE_AVI) + regmap_write(it->hdmi_regmap, HDMI_REG_AVI_INFOFRM_CTRL, 0); + else + dev_dbg(it->dev, "unsupported HDMI infoframe 0x%x\n", type); + + return 0; +} + +static int it6263_hdmi_write_infoframe(struct drm_bridge *bridge, + enum hdmi_infoframe_type type, + const u8 *buffer, size_t len) +{ + struct it6263 *it = bridge_to_it6263(bridge); + struct regmap *regmap = it->hdmi_regmap; + + if (type != HDMI_INFOFRAME_TYPE_AVI) { + dev_dbg(it->dev, "unsupported HDMI infoframe 0x%x\n", type); + return 0; + } + + /* write the first AVI infoframe data byte chunk(DB1-DB5) */ + regmap_bulk_write(regmap, HDMI_REG_AVI_DB1, + &buffer[HDMI_INFOFRAME_HEADER_SIZE], + HDMI_AVI_DB_CHUNK1_SIZE); + + /* write the second AVI infoframe data byte chunk(DB6-DB13) */ + regmap_bulk_write(regmap, HDMI_REG_AVI_DB6, + &buffer[HDMI_INFOFRAME_HEADER_SIZE + + HDMI_AVI_DB_CHUNK1_SIZE], + HDMI_AVI_DB_CHUNK2_SIZE); + + /* write checksum */ + regmap_write(regmap, HDMI_REG_AVI_CSUM, buffer[3]); + + regmap_write(regmap, HDMI_REG_AVI_INFOFRM_CTRL, ENABLE_PKT | REPEAT_PKT); + + return 0; +} + +static const struct drm_bridge_funcs it6263_bridge_funcs = { + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .atomic_reset = drm_atomic_helper_bridge_reset, + .attach = it6263_bridge_attach, + .mode_valid = it6263_bridge_mode_valid, + .atomic_disable = it6263_bridge_atomic_disable, + .atomic_enable = it6263_bridge_atomic_enable, + .atomic_check = it6263_bridge_atomic_check, + .detect = it6263_bridge_detect, + .edid_read = it6263_bridge_edid_read, + .atomic_get_input_bus_fmts = it6263_bridge_atomic_get_input_bus_fmts, + .hdmi_tmds_char_rate_valid = it6263_hdmi_tmds_char_rate_valid, + .hdmi_clear_infoframe = it6263_hdmi_clear_infoframe, + .hdmi_write_infoframe = it6263_hdmi_write_infoframe, +}; + +static int it6263_probe(struct i2c_client *client) +{ + struct device *dev = &client->dev; + struct gpio_desc *reset_gpio; + struct it6263 *it; + int ret; + + it = devm_kzalloc(dev, sizeof(*it), GFP_KERNEL); + if (!it) + return -ENOMEM; + + it->dev = dev; + it->hdmi_i2c = client; + + it->hdmi_regmap = devm_regmap_init_i2c(client, + &it6263_hdmi_regmap_config); + if (IS_ERR(it->hdmi_regmap)) + return dev_err_probe(dev, PTR_ERR(it->hdmi_regmap), + "failed to init I2C regmap for HDMI\n"); + + reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(reset_gpio)) + return dev_err_probe(dev, PTR_ERR(reset_gpio), + "failed to get reset gpio\n"); + + ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(it6263_supplies), + it6263_supplies); + if (ret) + return dev_err_probe(dev, ret, "failed to get power supplies\n"); + + ret = it6263_parse_dt(it); + if (ret) + return ret; + + it6263_hw_reset(reset_gpio); + + ret = it6263_lvds_set_i2c_addr(it); + if (ret) + return dev_err_probe(dev, ret, "failed to set I2C addr\n"); + + it->lvds_i2c = devm_i2c_new_dummy_device(dev, client->adapter, + LVDS_INPUT_CTRL_I2C_ADDR); + if (IS_ERR(it->lvds_i2c)) + dev_err_probe(it->dev, PTR_ERR(it->lvds_i2c), + "failed to allocate I2C device for LVDS\n"); + + it->lvds_regmap = devm_regmap_init_i2c(it->lvds_i2c, + &it6263_lvds_regmap_config); + if (IS_ERR(it->lvds_regmap)) + return dev_err_probe(dev, PTR_ERR(it->lvds_regmap), + "failed to init I2C regmap for LVDS\n"); + + it6263_lvds_config(it); + it6263_hdmi_config(it); + + i2c_set_clientdata(client, it); + + it->bridge.funcs = &it6263_bridge_funcs; + it->bridge.of_node = dev->of_node; + /* IT6263 chip doesn't support HPD interrupt. */ + it->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID | + DRM_BRIDGE_OP_HDMI; + it->bridge.type = DRM_MODE_CONNECTOR_HDMIA; + it->bridge.vendor = "ITE"; + it->bridge.product = "IT6263"; + + return devm_drm_bridge_add(dev, &it->bridge); +} + +static const struct of_device_id it6263_of_match[] = { + { .compatible = "ite,it6263", }, + { } +}; +MODULE_DEVICE_TABLE(of, it6263_of_match); + +static const struct i2c_device_id it6263_i2c_ids[] = { + { "it6263", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, it6263_i2c_ids); + +static struct i2c_driver it6263_driver = { + .probe = it6263_probe, + .driver = { + .name = "it6263", + .of_match_table = it6263_of_match, + }, + .id_table = it6263_i2c_ids, +}; +module_i2c_driver(it6263_driver); + +MODULE_DESCRIPTION("ITE Tech. Inc. IT6263 LVDS/HDMI bridge"); +MODULE_AUTHOR("Liu Ying <victor.liu@nxp.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c index 7ff17aa14b01..008d86cc562a 100644 --- a/drivers/gpu/drm/bridge/ite-it6505.c +++ b/drivers/gpu/drm/bridge/ite-it6505.c @@ -2614,9 +2614,9 @@ static int it6505_poweron(struct it6505 *it6505) /* time interval between OVDD and SYSRSTN at least be 10ms */ if (pdata->gpiod_reset) { usleep_range(10000, 20000); - gpiod_set_value_cansleep(pdata->gpiod_reset, 0); - usleep_range(1000, 2000); gpiod_set_value_cansleep(pdata->gpiod_reset, 1); + usleep_range(1000, 2000); + gpiod_set_value_cansleep(pdata->gpiod_reset, 0); usleep_range(25000, 35000); } @@ -2647,7 +2647,7 @@ static int it6505_poweroff(struct it6505 *it6505) disable_irq_nosync(it6505->irq); if (pdata->gpiod_reset) - gpiod_set_value_cansleep(pdata->gpiod_reset, 0); + gpiod_set_value_cansleep(pdata->gpiod_reset, 1); if (pdata->pwr18) { err = regulator_disable(pdata->pwr18); @@ -3135,7 +3135,7 @@ static int it6505_init_pdata(struct it6505 *it6505) return PTR_ERR(pdata->ovdd); } - pdata->gpiod_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); + pdata->gpiod_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(pdata->gpiod_reset)) { dev_err(dev, "gpiod_reset gpio not found"); return PTR_ERR(pdata->gpiod_reset); diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c index 925e42f46cd8..35ae3f0e8f51 100644 --- a/drivers/gpu/drm/bridge/ite-it66121.c +++ b/drivers/gpu/drm/bridge/ite-it66121.c @@ -770,8 +770,6 @@ void it66121_bridge_mode_set(struct drm_bridge *bridge, mutex_lock(&ctx->lock); - hdmi_avi_infoframe_init(&ctx->hdmi_avi_infoframe); - ret = drm_hdmi_avi_infoframe_from_display_mode(&ctx->hdmi_avi_infoframe, ctx->connector, adjusted_mode); if (ret) { diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c index 73983f9b50cb..1b31fdebe164 100644 --- a/drivers/gpu/drm/bridge/lontium-lt9611.c +++ b/drivers/gpu/drm/bridge/lontium-lt9611.c @@ -23,6 +23,8 @@ #include <drm/drm_of.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> +#include <drm/display/drm_hdmi_helper.h> +#include <drm/display/drm_hdmi_state_helper.h> #define EDID_SEG_SIZE 256 #define EDID_LEN 32 @@ -333,49 +335,6 @@ end: return temp; } -static void lt9611_hdmi_set_infoframes(struct lt9611 *lt9611, - struct drm_connector *connector, - struct drm_display_mode *mode) -{ - union hdmi_infoframe infoframe; - ssize_t len; - u8 iframes = 0x0a; /* UD1 infoframe */ - u8 buf[32]; - int ret; - int i; - - ret = drm_hdmi_avi_infoframe_from_display_mode(&infoframe.avi, - connector, - mode); - if (ret < 0) - goto out; - - len = hdmi_infoframe_pack(&infoframe, buf, sizeof(buf)); - if (len < 0) - goto out; - - for (i = 0; i < len; i++) - regmap_write(lt9611->regmap, 0x8440 + i, buf[i]); - - ret = drm_hdmi_vendor_infoframe_from_display_mode(&infoframe.vendor.hdmi, - connector, - mode); - if (ret < 0) - goto out; - - len = hdmi_infoframe_pack(&infoframe, buf, sizeof(buf)); - if (len < 0) - goto out; - - for (i = 0; i < len; i++) - regmap_write(lt9611->regmap, 0x8474 + i, buf[i]); - - iframes |= 0x20; - -out: - regmap_write(lt9611->regmap, 0x843d, iframes); /* UD1 infoframe */ -} - static void lt9611_hdmi_tx_digital(struct lt9611 *lt9611, bool is_hdmi) { if (is_hdmi) @@ -719,7 +678,7 @@ lt9611_bridge_atomic_enable(struct drm_bridge *bridge, } lt9611_mipi_input_analog(lt9611); - lt9611_hdmi_set_infoframes(lt9611, connector, mode); + drm_atomic_helper_connector_hdmi_update_infoframes(connector, state); lt9611_hdmi_tx_digital(lt9611, connector->display_info.is_hdmi); lt9611_hdmi_tx_phy(lt9611); @@ -798,22 +757,25 @@ static enum drm_mode_status lt9611_bridge_mode_valid(struct drm_bridge *bridge, const struct drm_display_mode *mode) { struct lt9611 *lt9611 = bridge_to_lt9611(bridge); + unsigned long long rate; if (mode->hdisplay > 3840) return MODE_BAD_HVALUE; - if (mode->vdisplay > 2160) - return MODE_BAD_VVALUE; - - if (mode->hdisplay == 3840 && - mode->vdisplay == 2160 && - drm_mode_vrefresh(mode) > 30) - return MODE_CLOCK_HIGH; - if (mode->hdisplay > 2000 && !lt9611->dsi1_node) return MODE_PANEL; - else - return MODE_OK; + + rate = drm_hdmi_compute_mode_clock(mode, 8, HDMI_COLORSPACE_RGB); + return bridge->funcs->hdmi_tmds_char_rate_valid(bridge, mode, rate); +} + +static int lt9611_bridge_atomic_check(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + return drm_atomic_helper_connector_hdmi_check(conn_state->connector, + conn_state->state); } static void lt9611_bridge_atomic_pre_enable(struct drm_bridge *bridge, @@ -887,6 +849,99 @@ lt9611_atomic_get_input_bus_fmts(struct drm_bridge *bridge, return input_fmts; } +/* + * Other working frames: + * - 0x01, 0x84df + * - 0x04, 0x84c0 + */ +#define LT9611_INFOFRAME_AUDIO 0x02 +#define LT9611_INFOFRAME_AVI 0x08 +#define LT9611_INFOFRAME_SPD 0x10 +#define LT9611_INFOFRAME_VENDOR 0x20 + +static int lt9611_hdmi_clear_infoframe(struct drm_bridge *bridge, + enum hdmi_infoframe_type type) +{ + struct lt9611 *lt9611 = bridge_to_lt9611(bridge); + unsigned int mask; + + switch (type) { + case HDMI_INFOFRAME_TYPE_AVI: + mask = LT9611_INFOFRAME_AVI; + break; + + case HDMI_INFOFRAME_TYPE_SPD: + mask = LT9611_INFOFRAME_SPD; + break; + + case HDMI_INFOFRAME_TYPE_VENDOR: + mask = LT9611_INFOFRAME_VENDOR; + break; + + default: + drm_dbg_driver(lt9611->bridge.dev, "Unsupported HDMI InfoFrame %x\n", type); + mask = 0; + break; + } + + if (mask) + regmap_update_bits(lt9611->regmap, 0x843d, mask, 0); + + return 0; +} + +static int lt9611_hdmi_write_infoframe(struct drm_bridge *bridge, + enum hdmi_infoframe_type type, + const u8 *buffer, size_t len) +{ + struct lt9611 *lt9611 = bridge_to_lt9611(bridge); + unsigned int mask, addr; + int i; + + switch (type) { + case HDMI_INFOFRAME_TYPE_AVI: + mask = LT9611_INFOFRAME_AVI; + addr = 0x8440; + break; + + case HDMI_INFOFRAME_TYPE_SPD: + mask = LT9611_INFOFRAME_SPD; + addr = 0x8493; + break; + + case HDMI_INFOFRAME_TYPE_VENDOR: + mask = LT9611_INFOFRAME_VENDOR; + addr = 0x8474; + break; + + default: + drm_dbg_driver(lt9611->bridge.dev, "Unsupported HDMI InfoFrame %x\n", type); + mask = 0; + break; + } + + if (mask) { + for (i = 0; i < len; i++) + regmap_write(lt9611->regmap, addr + i, buffer[i]); + + regmap_update_bits(lt9611->regmap, 0x843d, mask, mask); + } + + return 0; +} + +static enum drm_mode_status +lt9611_hdmi_tmds_char_rate_valid(const struct drm_bridge *bridge, + const struct drm_display_mode *mode, + unsigned long long tmds_rate) +{ + /* 297 MHz for 4k@30 mode */ + if (tmds_rate > 297000000) + return MODE_CLOCK_HIGH; + + return MODE_OK; +} + static const struct drm_bridge_funcs lt9611_bridge_funcs = { .attach = lt9611_bridge_attach, .mode_valid = lt9611_bridge_mode_valid, @@ -894,6 +949,7 @@ static const struct drm_bridge_funcs lt9611_bridge_funcs = { .edid_read = lt9611_bridge_edid_read, .hpd_enable = lt9611_bridge_hpd_enable, + .atomic_check = lt9611_bridge_atomic_check, .atomic_pre_enable = lt9611_bridge_atomic_pre_enable, .atomic_enable = lt9611_bridge_atomic_enable, .atomic_disable = lt9611_bridge_atomic_disable, @@ -902,6 +958,10 @@ static const struct drm_bridge_funcs lt9611_bridge_funcs = { .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .atomic_reset = drm_atomic_helper_bridge_reset, .atomic_get_input_bus_fmts = lt9611_atomic_get_input_bus_fmts, + + .hdmi_tmds_char_rate_valid = lt9611_hdmi_tmds_char_rate_valid, + .hdmi_write_infoframe = lt9611_hdmi_write_infoframe, + .hdmi_clear_infoframe = lt9611_hdmi_clear_infoframe, }; static int lt9611_parse_dt(struct device *dev, @@ -1116,8 +1176,11 @@ static int lt9611_probe(struct i2c_client *client) lt9611->bridge.funcs = <9611_bridge_funcs; lt9611->bridge.of_node = client->dev.of_node; lt9611->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID | - DRM_BRIDGE_OP_HPD | DRM_BRIDGE_OP_MODES; + DRM_BRIDGE_OP_HPD | DRM_BRIDGE_OP_MODES | + DRM_BRIDGE_OP_HDMI; lt9611->bridge.type = DRM_MODE_CONNECTOR_HDMIA; + lt9611->bridge.vendor = "Lontium"; + lt9611->bridge.product = "LT9611"; drm_bridge_add(<9611->bridge); diff --git a/drivers/gpu/drm/bridge/lvds-codec.c b/drivers/gpu/drm/bridge/lvds-codec.c index 991732c4b629..389af0233fcd 100644 --- a/drivers/gpu/drm/bridge/lvds-codec.c +++ b/drivers/gpu/drm/bridge/lvds-codec.c @@ -236,7 +236,7 @@ MODULE_DEVICE_TABLE(of, lvds_codec_match); static struct platform_driver lvds_codec_driver = { .probe = lvds_codec_probe, - .remove_new = lvds_codec_remove, + .remove = lvds_codec_remove, .driver = { .name = "lvds-codec", .of_match_table = lvds_codec_match, diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c index 5f05647a3bea..1e5b2a37cb8c 100644 --- a/drivers/gpu/drm/bridge/nwl-dsi.c +++ b/drivers/gpu/drm/bridge/nwl-dsi.c @@ -1211,7 +1211,7 @@ static void nwl_dsi_remove(struct platform_device *pdev) static struct platform_driver nwl_dsi_driver = { .probe = nwl_dsi_probe, - .remove_new = nwl_dsi_remove, + .remove = nwl_dsi_remove, .driver = { .of_match_table = nwl_dsi_dt_ids, .name = DRV_NAME, diff --git a/drivers/gpu/drm/bridge/samsung-dsim.c b/drivers/gpu/drm/bridge/samsung-dsim.c index 4416d0be7272..f8b4fb835765 100644 --- a/drivers/gpu/drm/bridge/samsung-dsim.c +++ b/drivers/gpu/drm/bridge/samsung-dsim.c @@ -2139,7 +2139,7 @@ MODULE_DEVICE_TABLE(of, samsung_dsim_of_match); static struct platform_driver samsung_dsim_driver = { .probe = samsung_dsim_probe, - .remove_new = samsung_dsim_remove, + .remove = samsung_dsim_remove, .driver = { .name = "samsung-dsim", .pm = pm_ptr(&samsung_dsim_pm_ops), diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c index 7f91b0db161e..9be9cc5b9025 100644 --- a/drivers/gpu/drm/bridge/sii902x.c +++ b/drivers/gpu/drm/bridge/sii902x.c @@ -180,6 +180,8 @@ struct sii902x { struct gpio_desc *reset_gpio; struct i2c_mux_core *i2cmux; bool sink_is_hdmi; + u32 bus_width; + /* * Mutex protects audio and video functions from interfering * each other, by keeping their i2c command sequences atomic. @@ -477,6 +479,8 @@ static u32 *sii902x_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge, u32 output_fmt, unsigned int *num_input_fmts) { + + struct sii902x *sii902x = bridge_to_sii902x(bridge); u32 *input_fmts; *num_input_fmts = 0; @@ -485,7 +489,20 @@ static u32 *sii902x_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge, if (!input_fmts) return NULL; - input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X24; + switch (sii902x->bus_width) { + case 16: + input_fmts[0] = MEDIA_BUS_FMT_RGB565_1X16; + break; + case 18: + input_fmts[0] = MEDIA_BUS_FMT_RGB666_1X18; + break; + case 24: + input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X24; + break; + default: + return NULL; + } + *num_input_fmts = 1; return input_fmts; @@ -1167,6 +1184,11 @@ static int sii902x_probe(struct i2c_client *client) return PTR_ERR(sii902x->reset_gpio); } + sii902x->bus_width = 24; + endpoint = of_graph_get_endpoint_by_regs(dev->of_node, 0, -1); + if (endpoint) + of_property_read_u32(endpoint, "bus-width", &sii902x->bus_width); + endpoint = of_graph_get_endpoint_by_regs(dev->of_node, 1, -1); if (endpoint) { struct device_node *remote = of_graph_get_remote_port_parent(endpoint); diff --git a/drivers/gpu/drm/bridge/synopsys/Kconfig b/drivers/gpu/drm/bridge/synopsys/Kconfig index 15fc182d05ef..ca416dab156d 100644 --- a/drivers/gpu/drm/bridge/synopsys/Kconfig +++ b/drivers/gpu/drm/bridge/synopsys/Kconfig @@ -46,6 +46,14 @@ config DRM_DW_HDMI_CEC Support the CE interface which is part of the Synopsys Designware HDMI block. +config DRM_DW_HDMI_QP + tristate + select DRM_DISPLAY_HDMI_HELPER + select DRM_DISPLAY_HDMI_STATE_HELPER + select DRM_DISPLAY_HELPER + select DRM_KMS_HELPER + select REGMAP_MMIO + config DRM_DW_MIPI_DSI tristate select DRM_KMS_HELPER diff --git a/drivers/gpu/drm/bridge/synopsys/Makefile b/drivers/gpu/drm/bridge/synopsys/Makefile index ce715562e9e5..9869d9651ed1 100644 --- a/drivers/gpu/drm/bridge/synopsys/Makefile +++ b/drivers/gpu/drm/bridge/synopsys/Makefile @@ -5,4 +5,6 @@ obj-$(CONFIG_DRM_DW_HDMI_GP_AUDIO) += dw-hdmi-gp-audio.o obj-$(CONFIG_DRM_DW_HDMI_I2S_AUDIO) += dw-hdmi-i2s-audio.o obj-$(CONFIG_DRM_DW_HDMI_CEC) += dw-hdmi-cec.o +obj-$(CONFIG_DRM_DW_HDMI_QP) += dw-hdmi-qp.o + obj-$(CONFIG_DRM_DW_MIPI_DSI) += dw-mipi-dsi.o diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c index 221e9a4edb40..cf1f66b7b192 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c @@ -645,7 +645,7 @@ static SIMPLE_DEV_PM_OPS(snd_dw_hdmi_pm, snd_dw_hdmi_suspend, static struct platform_driver snd_dw_hdmi_driver = { .probe = snd_dw_hdmi_probe, - .remove_new = snd_dw_hdmi_remove, + .remove = snd_dw_hdmi_remove, .driver = { .name = DRIVER_NAME, .pm = PM_OPS, diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c index d4614de1ae1e..9549dabde941 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c @@ -346,7 +346,7 @@ static const struct dev_pm_ops dw_hdmi_cec_pm = { static struct platform_driver dw_hdmi_cec_driver = { .probe = dw_hdmi_cec_probe, - .remove_new = dw_hdmi_cec_remove, + .remove = dw_hdmi_cec_remove, .driver = { .name = "dw-hdmi-cec", .pm = pm_ptr(&dw_hdmi_cec_pm), diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c index 423762da2ab4..ab18f9a3bf23 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c @@ -181,7 +181,7 @@ static void snd_dw_hdmi_remove(struct platform_device *pdev) static struct platform_driver snd_dw_hdmi_driver = { .probe = snd_dw_hdmi_probe, - .remove_new = snd_dw_hdmi_remove, + .remove = snd_dw_hdmi_remove, .driver = { .name = DRIVER_NAME, }, diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c index 26c187d20d97..f1c5a8d0fa90 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c @@ -225,7 +225,7 @@ static void snd_dw_hdmi_remove(struct platform_device *pdev) static struct platform_driver snd_dw_hdmi_driver = { .probe = snd_dw_hdmi_probe, - .remove_new = snd_dw_hdmi_remove, + .remove = snd_dw_hdmi_remove, .driver = { .name = DRIVER_NAME, }, diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c new file mode 100644 index 000000000000..181c5164b231 --- /dev/null +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c @@ -0,0 +1,647 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (c) 2021-2022 Rockchip Electronics Co., Ltd. + * Copyright (c) 2024 Collabora Ltd. + * + * Author: Algea Cao <algea.cao@rock-chips.com> + * Author: Cristian Ciocaltea <cristian.ciocaltea@collabora.com> + */ +#include <linux/completion.h> +#include <linux/hdmi.h> +#include <linux/i2c.h> +#include <linux/irq.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/of.h> +#include <linux/workqueue.h> + +#include <drm/bridge/dw_hdmi_qp.h> +#include <drm/display/drm_hdmi_helper.h> +#include <drm/display/drm_hdmi_state_helper.h> +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> +#include <drm/drm_connector.h> +#include <drm/drm_edid.h> +#include <drm/drm_modes.h> + +#include <sound/hdmi-codec.h> + +#include "dw-hdmi-qp.h" + +#define DDC_CI_ADDR 0x37 +#define DDC_SEGMENT_ADDR 0x30 + +#define HDMI14_MAX_TMDSCLK 340000000 + +#define SCRAMB_POLL_DELAY_MS 3000 + +struct dw_hdmi_qp_i2c { + struct i2c_adapter adap; + + struct mutex lock; /* used to serialize data transfers */ + struct completion cmp; + u8 stat; + + u8 slave_reg; + bool is_regaddr; + bool is_segment; +}; + +struct dw_hdmi_qp { + struct drm_bridge bridge; + + struct device *dev; + struct dw_hdmi_qp_i2c *i2c; + + struct { + const struct dw_hdmi_qp_phy_ops *ops; + void *data; + } phy; + + struct regmap *regm; +}; + +static void dw_hdmi_qp_write(struct dw_hdmi_qp *hdmi, unsigned int val, + int offset) +{ + regmap_write(hdmi->regm, offset, val); +} + +static unsigned int dw_hdmi_qp_read(struct dw_hdmi_qp *hdmi, int offset) +{ + unsigned int val = 0; + + regmap_read(hdmi->regm, offset, &val); + + return val; +} + +static void dw_hdmi_qp_mod(struct dw_hdmi_qp *hdmi, unsigned int data, + unsigned int mask, unsigned int reg) +{ + regmap_update_bits(hdmi->regm, reg, mask, data); +} + +static int dw_hdmi_qp_i2c_read(struct dw_hdmi_qp *hdmi, + unsigned char *buf, unsigned int length) +{ + struct dw_hdmi_qp_i2c *i2c = hdmi->i2c; + int stat; + + if (!i2c->is_regaddr) { + dev_dbg(hdmi->dev, "set read register address to 0\n"); + i2c->slave_reg = 0x00; + i2c->is_regaddr = true; + } + + while (length--) { + reinit_completion(&i2c->cmp); + + dw_hdmi_qp_mod(hdmi, i2c->slave_reg++ << 12, I2CM_ADDR, + I2CM_INTERFACE_CONTROL0); + + if (i2c->is_segment) + dw_hdmi_qp_mod(hdmi, I2CM_EXT_READ, I2CM_WR_MASK, + I2CM_INTERFACE_CONTROL0); + else + dw_hdmi_qp_mod(hdmi, I2CM_FM_READ, I2CM_WR_MASK, + I2CM_INTERFACE_CONTROL0); + + stat = wait_for_completion_timeout(&i2c->cmp, HZ / 10); + if (!stat) { + dev_err(hdmi->dev, "i2c read timed out\n"); + dw_hdmi_qp_write(hdmi, 0x01, I2CM_CONTROL0); + return -EAGAIN; + } + + /* Check for error condition on the bus */ + if (i2c->stat & I2CM_NACK_RCVD_IRQ) { + dev_err(hdmi->dev, "i2c read error\n"); + dw_hdmi_qp_write(hdmi, 0x01, I2CM_CONTROL0); + return -EIO; + } + + *buf++ = dw_hdmi_qp_read(hdmi, I2CM_INTERFACE_RDDATA_0_3) & 0xff; + dw_hdmi_qp_mod(hdmi, 0, I2CM_WR_MASK, I2CM_INTERFACE_CONTROL0); + } + + i2c->is_segment = false; + + return 0; +} + +static int dw_hdmi_qp_i2c_write(struct dw_hdmi_qp *hdmi, + unsigned char *buf, unsigned int length) +{ + struct dw_hdmi_qp_i2c *i2c = hdmi->i2c; + int stat; + + if (!i2c->is_regaddr) { + /* Use the first write byte as register address */ + i2c->slave_reg = buf[0]; + length--; + buf++; + i2c->is_regaddr = true; + } + + while (length--) { + reinit_completion(&i2c->cmp); + + dw_hdmi_qp_write(hdmi, *buf++, I2CM_INTERFACE_WRDATA_0_3); + dw_hdmi_qp_mod(hdmi, i2c->slave_reg++ << 12, I2CM_ADDR, + I2CM_INTERFACE_CONTROL0); + dw_hdmi_qp_mod(hdmi, I2CM_FM_WRITE, I2CM_WR_MASK, + I2CM_INTERFACE_CONTROL0); + + stat = wait_for_completion_timeout(&i2c->cmp, HZ / 10); + if (!stat) { + dev_err(hdmi->dev, "i2c write time out!\n"); + dw_hdmi_qp_write(hdmi, 0x01, I2CM_CONTROL0); + return -EAGAIN; + } + + /* Check for error condition on the bus */ + if (i2c->stat & I2CM_NACK_RCVD_IRQ) { + dev_err(hdmi->dev, "i2c write nack!\n"); + dw_hdmi_qp_write(hdmi, 0x01, I2CM_CONTROL0); + return -EIO; + } + + dw_hdmi_qp_mod(hdmi, 0, I2CM_WR_MASK, I2CM_INTERFACE_CONTROL0); + } + + return 0; +} + +static int dw_hdmi_qp_i2c_xfer(struct i2c_adapter *adap, + struct i2c_msg *msgs, int num) +{ + struct dw_hdmi_qp *hdmi = i2c_get_adapdata(adap); + struct dw_hdmi_qp_i2c *i2c = hdmi->i2c; + u8 addr = msgs[0].addr; + int i, ret = 0; + + if (addr == DDC_CI_ADDR) + /* + * The internal I2C controller does not support the multi-byte + * read and write operations needed for DDC/CI. + * FIXME: Blacklist the DDC/CI address until we filter out + * unsupported I2C operations. + */ + return -EOPNOTSUPP; + + for (i = 0; i < num; i++) { + if (msgs[i].len == 0) { + dev_err(hdmi->dev, + "unsupported transfer %d/%d, no data\n", + i + 1, num); + return -EOPNOTSUPP; + } + } + + guard(mutex)(&i2c->lock); + + /* Unmute DONE and ERROR interrupts */ + dw_hdmi_qp_mod(hdmi, I2CM_NACK_RCVD_MASK_N | I2CM_OP_DONE_MASK_N, + I2CM_NACK_RCVD_MASK_N | I2CM_OP_DONE_MASK_N, + MAINUNIT_1_INT_MASK_N); + + /* Set slave device address taken from the first I2C message */ + if (addr == DDC_SEGMENT_ADDR && msgs[0].len == 1) + addr = DDC_ADDR; + + dw_hdmi_qp_mod(hdmi, addr << 5, I2CM_SLVADDR, I2CM_INTERFACE_CONTROL0); + + /* Set slave device register address on transfer */ + i2c->is_regaddr = false; + + /* Set segment pointer for I2C extended read mode operation */ + i2c->is_segment = false; + + for (i = 0; i < num; i++) { + if (msgs[i].addr == DDC_SEGMENT_ADDR && msgs[i].len == 1) { + i2c->is_segment = true; + dw_hdmi_qp_mod(hdmi, DDC_SEGMENT_ADDR, I2CM_SEG_ADDR, + I2CM_INTERFACE_CONTROL1); + dw_hdmi_qp_mod(hdmi, *msgs[i].buf << 7, I2CM_SEG_PTR, + I2CM_INTERFACE_CONTROL1); + } else { + if (msgs[i].flags & I2C_M_RD) + ret = dw_hdmi_qp_i2c_read(hdmi, msgs[i].buf, + msgs[i].len); + else + ret = dw_hdmi_qp_i2c_write(hdmi, msgs[i].buf, + msgs[i].len); + } + if (ret < 0) + break; + } + + if (!ret) + ret = num; + + /* Mute DONE and ERROR interrupts */ + dw_hdmi_qp_mod(hdmi, 0, I2CM_OP_DONE_MASK_N | I2CM_NACK_RCVD_MASK_N, + MAINUNIT_1_INT_MASK_N); + + return ret; +} + +static u32 dw_hdmi_qp_i2c_func(struct i2c_adapter *adapter) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + +static const struct i2c_algorithm dw_hdmi_qp_algorithm = { + .master_xfer = dw_hdmi_qp_i2c_xfer, + .functionality = dw_hdmi_qp_i2c_func, +}; + +static struct i2c_adapter *dw_hdmi_qp_i2c_adapter(struct dw_hdmi_qp *hdmi) +{ + struct dw_hdmi_qp_i2c *i2c; + struct i2c_adapter *adap; + int ret; + + i2c = devm_kzalloc(hdmi->dev, sizeof(*i2c), GFP_KERNEL); + if (!i2c) + return ERR_PTR(-ENOMEM); + + mutex_init(&i2c->lock); + init_completion(&i2c->cmp); + + adap = &i2c->adap; + adap->owner = THIS_MODULE; + adap->dev.parent = hdmi->dev; + adap->algo = &dw_hdmi_qp_algorithm; + strscpy(adap->name, "DesignWare HDMI QP", sizeof(adap->name)); + + i2c_set_adapdata(adap, hdmi); + + ret = devm_i2c_add_adapter(hdmi->dev, adap); + if (ret) { + dev_warn(hdmi->dev, "cannot add %s I2C adapter\n", adap->name); + devm_kfree(hdmi->dev, i2c); + return ERR_PTR(ret); + } + + hdmi->i2c = i2c; + dev_info(hdmi->dev, "registered %s I2C bus driver\n", adap->name); + + return adap; +} + +static int dw_hdmi_qp_config_avi_infoframe(struct dw_hdmi_qp *hdmi, + const u8 *buffer, size_t len) +{ + u32 val, i, j; + + if (len != HDMI_INFOFRAME_SIZE(AVI)) { + dev_err(hdmi->dev, "failed to configure avi infoframe\n"); + return -EINVAL; + } + + /* + * DW HDMI QP IP uses a different byte format from standard AVI info + * frames, though generally the bits are in the correct bytes. + */ + val = buffer[1] << 8 | buffer[2] << 16; + dw_hdmi_qp_write(hdmi, val, PKT_AVI_CONTENTS0); + + for (i = 0; i < 4; i++) { + for (j = 0; j < 4; j++) { + if (i * 4 + j >= 14) + break; + if (!j) + val = buffer[i * 4 + j + 3]; + val |= buffer[i * 4 + j + 3] << (8 * j); + } + + dw_hdmi_qp_write(hdmi, val, PKT_AVI_CONTENTS1 + i * 4); + } + + dw_hdmi_qp_mod(hdmi, 0, PKTSCHED_AVI_FIELDRATE, PKTSCHED_PKT_CONFIG1); + + dw_hdmi_qp_mod(hdmi, PKTSCHED_AVI_TX_EN | PKTSCHED_GCP_TX_EN, + PKTSCHED_AVI_TX_EN | PKTSCHED_GCP_TX_EN, PKTSCHED_PKT_EN); + + return 0; +} + +static int dw_hdmi_qp_config_drm_infoframe(struct dw_hdmi_qp *hdmi, + const u8 *buffer, size_t len) +{ + u32 val, i; + + if (len != HDMI_INFOFRAME_SIZE(DRM)) { + dev_err(hdmi->dev, "failed to configure drm infoframe\n"); + return -EINVAL; + } + + dw_hdmi_qp_mod(hdmi, 0, PKTSCHED_DRMI_TX_EN, PKTSCHED_PKT_EN); + + val = buffer[1] << 8 | buffer[2] << 16; + dw_hdmi_qp_write(hdmi, val, PKT_DRMI_CONTENTS0); + + for (i = 0; i <= buffer[2]; i++) { + if (i % 4 == 0) + val = buffer[3 + i]; + val |= buffer[3 + i] << ((i % 4) * 8); + + if ((i % 4 == 3) || i == buffer[2]) + dw_hdmi_qp_write(hdmi, val, + PKT_DRMI_CONTENTS1 + ((i / 4) * 4)); + } + + dw_hdmi_qp_mod(hdmi, 0, PKTSCHED_DRMI_FIELDRATE, PKTSCHED_PKT_CONFIG1); + dw_hdmi_qp_mod(hdmi, PKTSCHED_DRMI_TX_EN, PKTSCHED_DRMI_TX_EN, + PKTSCHED_PKT_EN); + + return 0; +} + +static int dw_hdmi_qp_bridge_atomic_check(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct dw_hdmi_qp *hdmi = bridge->driver_private; + int ret; + + ret = drm_atomic_helper_connector_hdmi_check(conn_state->connector, + conn_state->state); + if (ret) + dev_dbg(hdmi->dev, "%s failed: %d\n", __func__, ret); + + return ret; +} + +static void dw_hdmi_qp_bridge_atomic_enable(struct drm_bridge *bridge, + struct drm_bridge_state *old_state) +{ + struct dw_hdmi_qp *hdmi = bridge->driver_private; + struct drm_atomic_state *state = old_state->base.state; + struct drm_connector_state *conn_state; + struct drm_connector *connector; + unsigned int op_mode; + + connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder); + if (WARN_ON(!connector)) + return; + + conn_state = drm_atomic_get_new_connector_state(state, connector); + if (WARN_ON(!conn_state)) + return; + + if (connector->display_info.is_hdmi) { + dev_dbg(hdmi->dev, "%s mode=HDMI rate=%llu\n", + __func__, conn_state->hdmi.tmds_char_rate); + op_mode = 0; + } else { + dev_dbg(hdmi->dev, "%s mode=DVI\n", __func__); + op_mode = OPMODE_DVI; + } + + hdmi->phy.ops->init(hdmi, hdmi->phy.data); + + dw_hdmi_qp_mod(hdmi, HDCP2_BYPASS, HDCP2_BYPASS, HDCP2LOGIC_CONFIG0); + dw_hdmi_qp_mod(hdmi, op_mode, OPMODE_DVI, LINK_CONFIG0); + + drm_atomic_helper_connector_hdmi_update_infoframes(connector, state); +} + +static void dw_hdmi_qp_bridge_atomic_disable(struct drm_bridge *bridge, + struct drm_bridge_state *old_state) +{ + struct dw_hdmi_qp *hdmi = bridge->driver_private; + + hdmi->phy.ops->disable(hdmi, hdmi->phy.data); +} + +static enum drm_connector_status +dw_hdmi_qp_bridge_detect(struct drm_bridge *bridge) +{ + struct dw_hdmi_qp *hdmi = bridge->driver_private; + + return hdmi->phy.ops->read_hpd(hdmi, hdmi->phy.data); +} + +static const struct drm_edid * +dw_hdmi_qp_bridge_edid_read(struct drm_bridge *bridge, + struct drm_connector *connector) +{ + struct dw_hdmi_qp *hdmi = bridge->driver_private; + const struct drm_edid *drm_edid; + + drm_edid = drm_edid_read_ddc(connector, bridge->ddc); + if (!drm_edid) + dev_dbg(hdmi->dev, "failed to get edid\n"); + + return drm_edid; +} + +static enum drm_mode_status +dw_hdmi_qp_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, + const struct drm_display_mode *mode) +{ + struct dw_hdmi_qp *hdmi = bridge->driver_private; + unsigned long long rate; + + rate = drm_hdmi_compute_mode_clock(mode, 8, HDMI_COLORSPACE_RGB); + if (rate > HDMI14_MAX_TMDSCLK) { + dev_dbg(hdmi->dev, "Unsupported mode clock: %d\n", mode->clock); + return MODE_CLOCK_HIGH; + } + + return MODE_OK; +} + +static int dw_hdmi_qp_bridge_clear_infoframe(struct drm_bridge *bridge, + enum hdmi_infoframe_type type) +{ + struct dw_hdmi_qp *hdmi = bridge->driver_private; + + switch (type) { + case HDMI_INFOFRAME_TYPE_AVI: + dw_hdmi_qp_mod(hdmi, 0, PKTSCHED_AVI_TX_EN | PKTSCHED_GCP_TX_EN, + PKTSCHED_PKT_EN); + break; + + case HDMI_INFOFRAME_TYPE_DRM: + dw_hdmi_qp_mod(hdmi, 0, PKTSCHED_DRMI_TX_EN, PKTSCHED_PKT_EN); + break; + + default: + dev_dbg(hdmi->dev, "Unsupported infoframe type %x\n", type); + } + + return 0; +} + +static int dw_hdmi_qp_bridge_write_infoframe(struct drm_bridge *bridge, + enum hdmi_infoframe_type type, + const u8 *buffer, size_t len) +{ + struct dw_hdmi_qp *hdmi = bridge->driver_private; + + dw_hdmi_qp_bridge_clear_infoframe(bridge, type); + + switch (type) { + case HDMI_INFOFRAME_TYPE_AVI: + return dw_hdmi_qp_config_avi_infoframe(hdmi, buffer, len); + + case HDMI_INFOFRAME_TYPE_DRM: + return dw_hdmi_qp_config_drm_infoframe(hdmi, buffer, len); + + default: + dev_dbg(hdmi->dev, "Unsupported infoframe type %x\n", type); + return 0; + } +} + +static const struct drm_bridge_funcs dw_hdmi_qp_bridge_funcs = { + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .atomic_reset = drm_atomic_helper_bridge_reset, + .atomic_check = dw_hdmi_qp_bridge_atomic_check, + .atomic_enable = dw_hdmi_qp_bridge_atomic_enable, + .atomic_disable = dw_hdmi_qp_bridge_atomic_disable, + .detect = dw_hdmi_qp_bridge_detect, + .edid_read = dw_hdmi_qp_bridge_edid_read, + .mode_valid = dw_hdmi_qp_bridge_mode_valid, + .hdmi_clear_infoframe = dw_hdmi_qp_bridge_clear_infoframe, + .hdmi_write_infoframe = dw_hdmi_qp_bridge_write_infoframe, +}; + +static irqreturn_t dw_hdmi_qp_main_hardirq(int irq, void *dev_id) +{ + struct dw_hdmi_qp *hdmi = dev_id; + struct dw_hdmi_qp_i2c *i2c = hdmi->i2c; + u32 stat; + + stat = dw_hdmi_qp_read(hdmi, MAINUNIT_1_INT_STATUS); + + i2c->stat = stat & (I2CM_OP_DONE_IRQ | I2CM_READ_REQUEST_IRQ | + I2CM_NACK_RCVD_IRQ); + + if (i2c->stat) { + dw_hdmi_qp_write(hdmi, i2c->stat, MAINUNIT_1_INT_CLEAR); + complete(&i2c->cmp); + } + + if (stat) + return IRQ_HANDLED; + + return IRQ_NONE; +} + +static const struct regmap_config dw_hdmi_qp_regmap_config = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + .max_register = EARCRX_1_INT_FORCE, +}; + +static void dw_hdmi_qp_init_hw(struct dw_hdmi_qp *hdmi) +{ + dw_hdmi_qp_write(hdmi, 0, MAINUNIT_0_INT_MASK_N); + dw_hdmi_qp_write(hdmi, 0, MAINUNIT_1_INT_MASK_N); + dw_hdmi_qp_write(hdmi, 428571429, TIMER_BASE_CONFIG0); + + /* Software reset */ + dw_hdmi_qp_write(hdmi, 0x01, I2CM_CONTROL0); + + dw_hdmi_qp_write(hdmi, 0x085c085c, I2CM_FM_SCL_CONFIG0); + + dw_hdmi_qp_mod(hdmi, 0, I2CM_FM_EN, I2CM_INTERFACE_CONTROL0); + + /* Clear DONE and ERROR interrupts */ + dw_hdmi_qp_write(hdmi, I2CM_OP_DONE_CLEAR | I2CM_NACK_RCVD_CLEAR, + MAINUNIT_1_INT_CLEAR); + + if (hdmi->phy.ops->setup_hpd) + hdmi->phy.ops->setup_hpd(hdmi, hdmi->phy.data); +} + +struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev, + struct drm_encoder *encoder, + const struct dw_hdmi_qp_plat_data *plat_data) +{ + struct device *dev = &pdev->dev; + struct dw_hdmi_qp *hdmi; + void __iomem *regs; + int ret; + + if (!plat_data->phy_ops || !plat_data->phy_ops->init || + !plat_data->phy_ops->disable || !plat_data->phy_ops->read_hpd) { + dev_err(dev, "Missing platform PHY ops\n"); + return ERR_PTR(-ENODEV); + } + + hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL); + if (!hdmi) + return ERR_PTR(-ENOMEM); + + hdmi->dev = dev; + + regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(regs)) + return ERR_CAST(regs); + + hdmi->regm = devm_regmap_init_mmio(dev, regs, &dw_hdmi_qp_regmap_config); + if (IS_ERR(hdmi->regm)) { + dev_err(dev, "Failed to configure regmap\n"); + return ERR_CAST(hdmi->regm); + } + + hdmi->phy.ops = plat_data->phy_ops; + hdmi->phy.data = plat_data->phy_data; + + dw_hdmi_qp_init_hw(hdmi); + + ret = devm_request_threaded_irq(dev, plat_data->main_irq, + dw_hdmi_qp_main_hardirq, NULL, + IRQF_SHARED, dev_name(dev), hdmi); + if (ret) + return ERR_PTR(ret); + + hdmi->bridge.driver_private = hdmi; + hdmi->bridge.funcs = &dw_hdmi_qp_bridge_funcs; + hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT | + DRM_BRIDGE_OP_EDID | + DRM_BRIDGE_OP_HDMI | + DRM_BRIDGE_OP_HPD; + hdmi->bridge.of_node = pdev->dev.of_node; + hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA; + hdmi->bridge.vendor = "Synopsys"; + hdmi->bridge.product = "DW HDMI QP TX"; + + hdmi->bridge.ddc = dw_hdmi_qp_i2c_adapter(hdmi); + if (IS_ERR(hdmi->bridge.ddc)) + return ERR_CAST(hdmi->bridge.ddc); + + ret = devm_drm_bridge_add(dev, &hdmi->bridge); + if (ret) + return ERR_PTR(ret); + + ret = drm_bridge_attach(encoder, &hdmi->bridge, NULL, + DRM_BRIDGE_ATTACH_NO_CONNECTOR); + if (ret) + return ERR_PTR(ret); + + return hdmi; +} +EXPORT_SYMBOL_GPL(dw_hdmi_qp_bind); + +void dw_hdmi_qp_resume(struct device *dev, struct dw_hdmi_qp *hdmi) +{ + dw_hdmi_qp_init_hw(hdmi); +} +EXPORT_SYMBOL_GPL(dw_hdmi_qp_resume); + +MODULE_AUTHOR("Algea Cao <algea.cao@rock-chips.com>"); +MODULE_AUTHOR("Cristian Ciocaltea <cristian.ciocaltea@collabora.com>"); +MODULE_DESCRIPTION("DW HDMI QP transmitter library"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h new file mode 100644 index 000000000000..2115b8ef0bd6 --- /dev/null +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h @@ -0,0 +1,834 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) Rockchip Electronics Co.Ltd + * Author: + * Algea Cao <algea.cao@rock-chips.com> + */ +#ifndef __DW_HDMI_QP_H__ +#define __DW_HDMI_QP_H__ + +#include <linux/bits.h> + +/* Main Unit Registers */ +#define CORE_ID 0x0 +#define VER_NUMBER 0x4 +#define VER_TYPE 0x8 +#define CONFIG_REG 0xc +#define CONFIG_CEC BIT(28) +#define CONFIG_AUD_UD BIT(23) +#define CORE_TIMESTAMP_HHMM 0x14 +#define CORE_TIMESTAMP_MMDD 0x18 +#define CORE_TIMESTAMP_YYYY 0x1c +/* Reset Manager Registers */ +#define GLOBAL_SWRESET_REQUEST 0x40 +#define EARCRX_CMDC_SWINIT_P BIT(27) +#define AVP_DATAPATH_PACKET_AUDIO_SWINIT_P BIT(10) +#define GLOBAL_SWDISABLE 0x44 +#define CEC_SWDISABLE BIT(17) +#define AVP_DATAPATH_PACKET_AUDIO_SWDISABLE BIT(10) +#define AVP_DATAPATH_VIDEO_SWDISABLE BIT(6) +#define RESET_MANAGER_CONFIG0 0x48 +#define RESET_MANAGER_STATUS0 0x50 +#define RESET_MANAGER_STATUS1 0x54 +#define RESET_MANAGER_STATUS2 0x58 +/* Timer Base Registers */ +#define TIMER_BASE_CONFIG0 0x80 +#define TIMER_BASE_STATUS0 0x84 +/* CMU Registers */ +#define CMU_CONFIG0 0xa0 +#define CMU_CONFIG1 0xa4 +#define CMU_CONFIG2 0xa8 +#define CMU_CONFIG3 0xac +#define CMU_STATUS 0xb0 +#define DISPLAY_CLK_MONITOR 0x3f +#define DISPLAY_CLK_LOCKED 0X15 +#define EARC_BPCLK_OFF BIT(9) +#define AUDCLK_OFF BIT(7) +#define LINKQPCLK_OFF BIT(5) +#define VIDQPCLK_OFF BIT(3) +#define IPI_CLK_OFF BIT(1) +#define CMU_IPI_CLK_FREQ 0xb4 +#define CMU_VIDQPCLK_FREQ 0xb8 +#define CMU_LINKQPCLK_FREQ 0xbc +#define CMU_AUDQPCLK_FREQ 0xc0 +#define CMU_EARC_BPCLK_FREQ 0xc4 +/* I2CM Registers */ +#define I2CM_SM_SCL_CONFIG0 0xe0 +#define I2CM_FM_SCL_CONFIG0 0xe4 +#define I2CM_CONFIG0 0xe8 +#define I2CM_CONTROL0 0xec +#define I2CM_STATUS0 0xf0 +#define I2CM_INTERFACE_CONTROL0 0xf4 +#define I2CM_ADDR 0xff000 +#define I2CM_SLVADDR 0xfe0 +#define I2CM_WR_MASK 0x1e +#define I2CM_EXT_READ BIT(4) +#define I2CM_SHORT_READ BIT(3) +#define I2CM_FM_READ BIT(2) +#define I2CM_FM_WRITE BIT(1) +#define I2CM_FM_EN BIT(0) +#define I2CM_INTERFACE_CONTROL1 0xf8 +#define I2CM_SEG_PTR 0x7f80 +#define I2CM_SEG_ADDR 0x7f +#define I2CM_INTERFACE_WRDATA_0_3 0xfc +#define I2CM_INTERFACE_WRDATA_4_7 0x100 +#define I2CM_INTERFACE_WRDATA_8_11 0x104 +#define I2CM_INTERFACE_WRDATA_12_15 0x108 +#define I2CM_INTERFACE_RDDATA_0_3 0x10c +#define I2CM_INTERFACE_RDDATA_4_7 0x110 +#define I2CM_INTERFACE_RDDATA_8_11 0x114 +#define I2CM_INTERFACE_RDDATA_12_15 0x118 +/* SCDC Registers */ +#define SCDC_CONFIG0 0x140 +#define SCDC_I2C_FM_EN BIT(12) +#define SCDC_UPD_FLAGS_AUTO_CLR BIT(6) +#define SCDC_UPD_FLAGS_POLL_EN BIT(4) +#define SCDC_CONTROL0 0x148 +#define SCDC_STATUS0 0x150 +#define STATUS_UPDATE BIT(0) +#define FRL_START BIT(4) +#define FLT_UPDATE BIT(5) +/* FLT Registers */ +#define FLT_CONFIG0 0x160 +#define FLT_CONFIG1 0x164 +#define FLT_CONFIG2 0x168 +#define FLT_CONTROL0 0x170 +/* Main Unit 2 Registers */ +#define MAINUNIT_STATUS0 0x180 +/* Video Interface Registers */ +#define VIDEO_INTERFACE_CONFIG0 0x800 +#define VIDEO_INTERFACE_CONFIG1 0x804 +#define VIDEO_INTERFACE_CONFIG2 0x808 +#define VIDEO_INTERFACE_CONTROL0 0x80c +#define VIDEO_INTERFACE_STATUS0 0x814 +/* Video Packing Registers */ +#define VIDEO_PACKING_CONFIG0 0x81c +/* Audio Interface Registers */ +#define AUDIO_INTERFACE_CONFIG0 0x820 +#define AUD_IF_SEL_MSK 0x3 +#define AUD_IF_SPDIF 0x2 +#define AUD_IF_I2S 0x1 +#define AUD_IF_PAI 0x0 +#define AUD_FIFO_INIT_ON_OVF_MSK BIT(2) +#define AUD_FIFO_INIT_ON_OVF_EN BIT(2) +#define I2S_LINES_EN_MSK GENMASK(7, 4) +#define I2S_LINES_EN(x) BIT((x) + 4) +#define I2S_BPCUV_RCV_MSK BIT(12) +#define I2S_BPCUV_RCV_EN BIT(12) +#define I2S_BPCUV_RCV_DIS 0 +#define SPDIF_LINES_EN GENMASK(19, 16) +#define AUD_FORMAT_MSK GENMASK(26, 24) +#define AUD_3DOBA (0x7 << 24) +#define AUD_3DASP (0x6 << 24) +#define AUD_MSOBA (0x5 << 24) +#define AUD_MSASP (0x4 << 24) +#define AUD_HBR (0x3 << 24) +#define AUD_DST (0x2 << 24) +#define AUD_OBA (0x1 << 24) +#define AUD_ASP (0x0 << 24) +#define AUDIO_INTERFACE_CONFIG1 0x824 +#define AUDIO_INTERFACE_CONTROL0 0x82c +#define AUDIO_FIFO_CLR_P BIT(0) +#define AUDIO_INTERFACE_STATUS0 0x834 +/* Frame Composer Registers */ +#define FRAME_COMPOSER_CONFIG0 0x840 +#define FRAME_COMPOSER_CONFIG1 0x844 +#define FRAME_COMPOSER_CONFIG2 0x848 +#define FRAME_COMPOSER_CONFIG3 0x84c +#define FRAME_COMPOSER_CONFIG4 0x850 +#define FRAME_COMPOSER_CONFIG5 0x854 +#define FRAME_COMPOSER_CONFIG6 0x858 +#define FRAME_COMPOSER_CONFIG7 0x85c +#define FRAME_COMPOSER_CONFIG8 0x860 +#define FRAME_COMPOSER_CONFIG9 0x864 +#define FRAME_COMPOSER_CONTROL0 0x86c +/* Video Monitor Registers */ +#define VIDEO_MONITOR_CONFIG0 0x880 +#define VIDEO_MONITOR_STATUS0 0x884 +#define VIDEO_MONITOR_STATUS1 0x888 +#define VIDEO_MONITOR_STATUS2 0x88c +#define VIDEO_MONITOR_STATUS3 0x890 +#define VIDEO_MONITOR_STATUS4 0x894 +#define VIDEO_MONITOR_STATUS5 0x898 +#define VIDEO_MONITOR_STATUS6 0x89c +/* HDCP2 Logic Registers */ +#define HDCP2LOGIC_CONFIG0 0x8e0 +#define HDCP2_BYPASS BIT(0) +#define HDCP2LOGIC_ESM_GPIO_IN 0x8e4 +#define HDCP2LOGIC_ESM_GPIO_OUT 0x8e8 +/* HDCP14 Registers */ +#define HDCP14_CONFIG0 0x900 +#define HDCP14_CONFIG1 0x904 +#define HDCP14_CONFIG2 0x908 +#define HDCP14_CONFIG3 0x90c +#define HDCP14_KEY_SEED 0x914 +#define HDCP14_KEY_H 0x918 +#define HDCP14_KEY_L 0x91c +#define HDCP14_KEY_STATUS 0x920 +#define HDCP14_AKSV_H 0x924 +#define HDCP14_AKSV_L 0x928 +#define HDCP14_AN_H 0x92c +#define HDCP14_AN_L 0x930 +#define HDCP14_STATUS0 0x934 +#define HDCP14_STATUS1 0x938 +/* Scrambler Registers */ +#define SCRAMB_CONFIG0 0x960 +/* Video Configuration Registers */ +#define LINK_CONFIG0 0x968 +#define OPMODE_FRL_4LANES BIT(8) +#define OPMODE_DVI BIT(4) +#define OPMODE_FRL BIT(0) +/* TMDS FIFO Registers */ +#define TMDS_FIFO_CONFIG0 0x970 +#define TMDS_FIFO_CONTROL0 0x974 +/* FRL RSFEC Registers */ +#define FRL_RSFEC_CONFIG0 0xa20 +#define FRL_RSFEC_STATUS0 0xa30 +/* FRL Packetizer Registers */ +#define FRL_PKTZ_CONFIG0 0xa40 +#define FRL_PKTZ_CONTROL0 0xa44 +#define FRL_PKTZ_CONTROL1 0xa50 +#define FRL_PKTZ_STATUS1 0xa54 +/* Packet Scheduler Registers */ +#define PKTSCHED_CONFIG0 0xa80 +#define PKTSCHED_PRQUEUE0_CONFIG0 0xa84 +#define PKTSCHED_PRQUEUE1_CONFIG0 0xa88 +#define PKTSCHED_PRQUEUE2_CONFIG0 0xa8c +#define PKTSCHED_PRQUEUE2_CONFIG1 0xa90 +#define PKTSCHED_PRQUEUE2_CONFIG2 0xa94 +#define PKTSCHED_PKT_CONFIG0 0xa98 +#define PKTSCHED_PKT_CONFIG1 0xa9c +#define PKTSCHED_DRMI_FIELDRATE BIT(13) +#define PKTSCHED_AVI_FIELDRATE BIT(12) +#define PKTSCHED_PKT_CONFIG2 0xaa0 +#define PKTSCHED_PKT_CONFIG3 0xaa4 +#define PKTSCHED_PKT_EN 0xaa8 +#define PKTSCHED_DRMI_TX_EN BIT(17) +#define PKTSCHED_AUDI_TX_EN BIT(15) +#define PKTSCHED_AVI_TX_EN BIT(13) +#define PKTSCHED_EMP_CVTEM_TX_EN BIT(10) +#define PKTSCHED_AMD_TX_EN BIT(8) +#define PKTSCHED_GCP_TX_EN BIT(3) +#define PKTSCHED_AUDS_TX_EN BIT(2) +#define PKTSCHED_ACR_TX_EN BIT(1) +#define PKTSCHED_NULL_TX_EN BIT(0) +#define PKTSCHED_PKT_CONTROL0 0xaac +#define PKTSCHED_PKT_SEND 0xab0 +#define PKTSCHED_PKT_STATUS0 0xab4 +#define PKTSCHED_PKT_STATUS1 0xab8 +#define PKT_NULL_CONTENTS0 0xb00 +#define PKT_NULL_CONTENTS1 0xb04 +#define PKT_NULL_CONTENTS2 0xb08 +#define PKT_NULL_CONTENTS3 0xb0c +#define PKT_NULL_CONTENTS4 0xb10 +#define PKT_NULL_CONTENTS5 0xb14 +#define PKT_NULL_CONTENTS6 0xb18 +#define PKT_NULL_CONTENTS7 0xb1c +#define PKT_ACP_CONTENTS0 0xb20 +#define PKT_ACP_CONTENTS1 0xb24 +#define PKT_ACP_CONTENTS2 0xb28 +#define PKT_ACP_CONTENTS3 0xb2c +#define PKT_ACP_CONTENTS4 0xb30 +#define PKT_ACP_CONTENTS5 0xb34 +#define PKT_ACP_CONTENTS6 0xb38 +#define PKT_ACP_CONTENTS7 0xb3c +#define PKT_ISRC1_CONTENTS0 0xb40 +#define PKT_ISRC1_CONTENTS1 0xb44 +#define PKT_ISRC1_CONTENTS2 0xb48 +#define PKT_ISRC1_CONTENTS3 0xb4c +#define PKT_ISRC1_CONTENTS4 0xb50 +#define PKT_ISRC1_CONTENTS5 0xb54 +#define PKT_ISRC1_CONTENTS6 0xb58 +#define PKT_ISRC1_CONTENTS7 0xb5c +#define PKT_ISRC2_CONTENTS0 0xb60 +#define PKT_ISRC2_CONTENTS1 0xb64 +#define PKT_ISRC2_CONTENTS2 0xb68 +#define PKT_ISRC2_CONTENTS3 0xb6c +#define PKT_ISRC2_CONTENTS4 0xb70 +#define PKT_ISRC2_CONTENTS5 0xb74 +#define PKT_ISRC2_CONTENTS6 0xb78 +#define PKT_ISRC2_CONTENTS7 0xb7c +#define PKT_GMD_CONTENTS0 0xb80 +#define PKT_GMD_CONTENTS1 0xb84 +#define PKT_GMD_CONTENTS2 0xb88 +#define PKT_GMD_CONTENTS3 0xb8c +#define PKT_GMD_CONTENTS4 0xb90 +#define PKT_GMD_CONTENTS5 0xb94 +#define PKT_GMD_CONTENTS6 0xb98 +#define PKT_GMD_CONTENTS7 0xb9c +#define PKT_AMD_CONTENTS0 0xba0 +#define PKT_AMD_CONTENTS1 0xba4 +#define PKT_AMD_CONTENTS2 0xba8 +#define PKT_AMD_CONTENTS3 0xbac +#define PKT_AMD_CONTENTS4 0xbb0 +#define PKT_AMD_CONTENTS5 0xbb4 +#define PKT_AMD_CONTENTS6 0xbb8 +#define PKT_AMD_CONTENTS7 0xbbc +#define PKT_VSI_CONTENTS0 0xbc0 +#define PKT_VSI_CONTENTS1 0xbc4 +#define PKT_VSI_CONTENTS2 0xbc8 +#define PKT_VSI_CONTENTS3 0xbcc +#define PKT_VSI_CONTENTS4 0xbd0 +#define PKT_VSI_CONTENTS5 0xbd4 +#define PKT_VSI_CONTENTS6 0xbd8 +#define PKT_VSI_CONTENTS7 0xbdc +#define PKT_AVI_CONTENTS0 0xbe0 +#define HDMI_FC_AVICONF0_ACTIVE_FMT_INFO_PRESENT BIT(4) +#define HDMI_FC_AVICONF0_BAR_DATA_VERT_BAR 0x04 +#define HDMI_FC_AVICONF0_BAR_DATA_HORIZ_BAR 0x08 +#define HDMI_FC_AVICONF2_IT_CONTENT_VALID 0x80 +#define PKT_AVI_CONTENTS1 0xbe4 +#define PKT_AVI_CONTENTS2 0xbe8 +#define PKT_AVI_CONTENTS3 0xbec +#define PKT_AVI_CONTENTS4 0xbf0 +#define PKT_AVI_CONTENTS5 0xbf4 +#define PKT_AVI_CONTENTS6 0xbf8 +#define PKT_AVI_CONTENTS7 0xbfc +#define PKT_SPDI_CONTENTS0 0xc00 +#define PKT_SPDI_CONTENTS1 0xc04 +#define PKT_SPDI_CONTENTS2 0xc08 +#define PKT_SPDI_CONTENTS3 0xc0c +#define PKT_SPDI_CONTENTS4 0xc10 +#define PKT_SPDI_CONTENTS5 0xc14 +#define PKT_SPDI_CONTENTS6 0xc18 +#define PKT_SPDI_CONTENTS7 0xc1c +#define PKT_AUDI_CONTENTS0 0xc20 +#define PKT_AUDI_CONTENTS1 0xc24 +#define PKT_AUDI_CONTENTS2 0xc28 +#define PKT_AUDI_CONTENTS3 0xc2c +#define PKT_AUDI_CONTENTS4 0xc30 +#define PKT_AUDI_CONTENTS5 0xc34 +#define PKT_AUDI_CONTENTS6 0xc38 +#define PKT_AUDI_CONTENTS7 0xc3c +#define PKT_NVI_CONTENTS0 0xc40 +#define PKT_NVI_CONTENTS1 0xc44 +#define PKT_NVI_CONTENTS2 0xc48 +#define PKT_NVI_CONTENTS3 0xc4c +#define PKT_NVI_CONTENTS4 0xc50 +#define PKT_NVI_CONTENTS5 0xc54 +#define PKT_NVI_CONTENTS6 0xc58 +#define PKT_NVI_CONTENTS7 0xc5c +#define PKT_DRMI_CONTENTS0 0xc60 +#define PKT_DRMI_CONTENTS1 0xc64 +#define PKT_DRMI_CONTENTS2 0xc68 +#define PKT_DRMI_CONTENTS3 0xc6c +#define PKT_DRMI_CONTENTS4 0xc70 +#define PKT_DRMI_CONTENTS5 0xc74 +#define PKT_DRMI_CONTENTS6 0xc78 +#define PKT_DRMI_CONTENTS7 0xc7c +#define PKT_GHDMI1_CONTENTS0 0xc80 +#define PKT_GHDMI1_CONTENTS1 0xc84 +#define PKT_GHDMI1_CONTENTS2 0xc88 +#define PKT_GHDMI1_CONTENTS3 0xc8c +#define PKT_GHDMI1_CONTENTS4 0xc90 +#define PKT_GHDMI1_CONTENTS5 0xc94 +#define PKT_GHDMI1_CONTENTS6 0xc98 +#define PKT_GHDMI1_CONTENTS7 0xc9c +#define PKT_GHDMI2_CONTENTS0 0xca0 +#define PKT_GHDMI2_CONTENTS1 0xca4 +#define PKT_GHDMI2_CONTENTS2 0xca8 +#define PKT_GHDMI2_CONTENTS3 0xcac +#define PKT_GHDMI2_CONTENTS4 0xcb0 +#define PKT_GHDMI2_CONTENTS5 0xcb4 +#define PKT_GHDMI2_CONTENTS6 0xcb8 +#define PKT_GHDMI2_CONTENTS7 0xcbc +/* EMP Packetizer Registers */ +#define PKT_EMP_CONFIG0 0xce0 +#define PKT_EMP_CONTROL0 0xcec +#define PKT_EMP_CONTROL1 0xcf0 +#define PKT_EMP_CONTROL2 0xcf4 +#define PKT_EMP_VTEM_CONTENTS0 0xd00 +#define PKT_EMP_VTEM_CONTENTS1 0xd04 +#define PKT_EMP_VTEM_CONTENTS2 0xd08 +#define PKT_EMP_VTEM_CONTENTS3 0xd0c +#define PKT_EMP_VTEM_CONTENTS4 0xd10 +#define PKT_EMP_VTEM_CONTENTS5 0xd14 +#define PKT_EMP_VTEM_CONTENTS6 0xd18 +#define PKT_EMP_VTEM_CONTENTS7 0xd1c +#define PKT0_EMP_CVTEM_CONTENTS0 0xd20 +#define PKT0_EMP_CVTEM_CONTENTS1 0xd24 +#define PKT0_EMP_CVTEM_CONTENTS2 0xd28 +#define PKT0_EMP_CVTEM_CONTENTS3 0xd2c +#define PKT0_EMP_CVTEM_CONTENTS4 0xd30 +#define PKT0_EMP_CVTEM_CONTENTS5 0xd34 +#define PKT0_EMP_CVTEM_CONTENTS6 0xd38 +#define PKT0_EMP_CVTEM_CONTENTS7 0xd3c +#define PKT1_EMP_CVTEM_CONTENTS0 0xd40 +#define PKT1_EMP_CVTEM_CONTENTS1 0xd44 +#define PKT1_EMP_CVTEM_CONTENTS2 0xd48 +#define PKT1_EMP_CVTEM_CONTENTS3 0xd4c +#define PKT1_EMP_CVTEM_CONTENTS4 0xd50 +#define PKT1_EMP_CVTEM_CONTENTS5 0xd54 +#define PKT1_EMP_CVTEM_CONTENTS6 0xd58 +#define PKT1_EMP_CVTEM_CONTENTS7 0xd5c +#define PKT2_EMP_CVTEM_CONTENTS0 0xd60 +#define PKT2_EMP_CVTEM_CONTENTS1 0xd64 +#define PKT2_EMP_CVTEM_CONTENTS2 0xd68 +#define PKT2_EMP_CVTEM_CONTENTS3 0xd6c +#define PKT2_EMP_CVTEM_CONTENTS4 0xd70 +#define PKT2_EMP_CVTEM_CONTENTS5 0xd74 +#define PKT2_EMP_CVTEM_CONTENTS6 0xd78 +#define PKT2_EMP_CVTEM_CONTENTS7 0xd7c +#define PKT3_EMP_CVTEM_CONTENTS0 0xd80 +#define PKT3_EMP_CVTEM_CONTENTS1 0xd84 +#define PKT3_EMP_CVTEM_CONTENTS2 0xd88 +#define PKT3_EMP_CVTEM_CONTENTS3 0xd8c +#define PKT3_EMP_CVTEM_CONTENTS4 0xd90 +#define PKT3_EMP_CVTEM_CONTENTS5 0xd94 +#define PKT3_EMP_CVTEM_CONTENTS6 0xd98 +#define PKT3_EMP_CVTEM_CONTENTS7 0xd9c +#define PKT4_EMP_CVTEM_CONTENTS0 0xda0 +#define PKT4_EMP_CVTEM_CONTENTS1 0xda4 +#define PKT4_EMP_CVTEM_CONTENTS2 0xda8 +#define PKT4_EMP_CVTEM_CONTENTS3 0xdac +#define PKT4_EMP_CVTEM_CONTENTS4 0xdb0 +#define PKT4_EMP_CVTEM_CONTENTS5 0xdb4 +#define PKT4_EMP_CVTEM_CONTENTS6 0xdb8 +#define PKT4_EMP_CVTEM_CONTENTS7 0xdbc +#define PKT5_EMP_CVTEM_CONTENTS0 0xdc0 +#define PKT5_EMP_CVTEM_CONTENTS1 0xdc4 +#define PKT5_EMP_CVTEM_CONTENTS2 0xdc8 +#define PKT5_EMP_CVTEM_CONTENTS3 0xdcc +#define PKT5_EMP_CVTEM_CONTENTS4 0xdd0 +#define PKT5_EMP_CVTEM_CONTENTS5 0xdd4 +#define PKT5_EMP_CVTEM_CONTENTS6 0xdd8 +#define PKT5_EMP_CVTEM_CONTENTS7 0xddc +/* Audio Packetizer Registers */ +#define AUDPKT_CONTROL0 0xe20 +#define AUDPKT_PBIT_FORCE_EN_MASK BIT(12) +#define AUDPKT_PBIT_FORCE_EN BIT(12) +#define AUDPKT_CHSTATUS_OVR_EN_MASK BIT(0) +#define AUDPKT_CHSTATUS_OVR_EN BIT(0) +#define AUDPKT_CONTROL1 0xe24 +#define AUDPKT_ACR_CONTROL0 0xe40 +#define AUDPKT_ACR_N_VALUE 0xfffff +#define AUDPKT_ACR_CONTROL1 0xe44 +#define AUDPKT_ACR_CTS_OVR_VAL_MSK GENMASK(23, 4) +#define AUDPKT_ACR_CTS_OVR_VAL(x) ((x) << 4) +#define AUDPKT_ACR_CTS_OVR_EN_MSK BIT(1) +#define AUDPKT_ACR_CTS_OVR_EN BIT(1) +#define AUDPKT_ACR_STATUS0 0xe4c +#define AUDPKT_CHSTATUS_OVR0 0xe60 +#define AUDPKT_CHSTATUS_OVR1 0xe64 +/* IEC60958 Byte 3: Sampleing frenuency Bits 24 to 27 */ +#define AUDPKT_CHSTATUS_SR_MASK GENMASK(3, 0) +#define AUDPKT_CHSTATUS_SR_22050 0x4 +#define AUDPKT_CHSTATUS_SR_24000 0x6 +#define AUDPKT_CHSTATUS_SR_32000 0x3 +#define AUDPKT_CHSTATUS_SR_44100 0x0 +#define AUDPKT_CHSTATUS_SR_48000 0x2 +#define AUDPKT_CHSTATUS_SR_88200 0x8 +#define AUDPKT_CHSTATUS_SR_96000 0xa +#define AUDPKT_CHSTATUS_SR_176400 0xc +#define AUDPKT_CHSTATUS_SR_192000 0xe +#define AUDPKT_CHSTATUS_SR_768000 0x9 +#define AUDPKT_CHSTATUS_SR_NOT_INDICATED 0x1 +/* IEC60958 Byte 4: Original Sampleing frenuency Bits 36 to 39 */ +#define AUDPKT_CHSTATUS_0SR_MASK GENMASK(15, 12) +#define AUDPKT_CHSTATUS_OSR_8000 0x6 +#define AUDPKT_CHSTATUS_OSR_11025 0xa +#define AUDPKT_CHSTATUS_OSR_12000 0x2 +#define AUDPKT_CHSTATUS_OSR_16000 0x8 +#define AUDPKT_CHSTATUS_OSR_22050 0xb +#define AUDPKT_CHSTATUS_OSR_24000 0x9 +#define AUDPKT_CHSTATUS_OSR_32000 0xc +#define AUDPKT_CHSTATUS_OSR_44100 0xf +#define AUDPKT_CHSTATUS_OSR_48000 0xd +#define AUDPKT_CHSTATUS_OSR_88200 0x7 +#define AUDPKT_CHSTATUS_OSR_96000 0x5 +#define AUDPKT_CHSTATUS_OSR_176400 0x3 +#define AUDPKT_CHSTATUS_OSR_192000 0x1 +#define AUDPKT_CHSTATUS_OSR_NOT_INDICATED 0x0 +#define AUDPKT_CHSTATUS_OVR2 0xe68 +#define AUDPKT_CHSTATUS_OVR3 0xe6c +#define AUDPKT_CHSTATUS_OVR4 0xe70 +#define AUDPKT_CHSTATUS_OVR5 0xe74 +#define AUDPKT_CHSTATUS_OVR6 0xe78 +#define AUDPKT_CHSTATUS_OVR7 0xe7c +#define AUDPKT_CHSTATUS_OVR8 0xe80 +#define AUDPKT_CHSTATUS_OVR9 0xe84 +#define AUDPKT_CHSTATUS_OVR10 0xe88 +#define AUDPKT_CHSTATUS_OVR11 0xe8c +#define AUDPKT_CHSTATUS_OVR12 0xe90 +#define AUDPKT_CHSTATUS_OVR13 0xe94 +#define AUDPKT_CHSTATUS_OVR14 0xe98 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC0 0xea0 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC1 0xea4 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC2 0xea8 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC3 0xeac +#define AUDPKT_USRDATA_OVR_MSG_GENERIC4 0xeb0 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC5 0xeb4 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC6 0xeb8 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC7 0xebc +#define AUDPKT_USRDATA_OVR_MSG_GENERIC8 0xec0 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC9 0xec4 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC10 0xec8 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC11 0xecc +#define AUDPKT_USRDATA_OVR_MSG_GENERIC12 0xed0 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC13 0xed4 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC14 0xed8 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC15 0xedc +#define AUDPKT_USRDATA_OVR_MSG_GENERIC16 0xee0 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC17 0xee4 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC18 0xee8 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC19 0xeec +#define AUDPKT_USRDATA_OVR_MSG_GENERIC20 0xef0 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC21 0xef4 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC22 0xef8 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC23 0xefc +#define AUDPKT_USRDATA_OVR_MSG_GENERIC24 0xf00 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC25 0xf04 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC26 0xf08 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC27 0xf0c +#define AUDPKT_USRDATA_OVR_MSG_GENERIC28 0xf10 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC29 0xf14 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC30 0xf18 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC31 0xf1c +#define AUDPKT_USRDATA_OVR_MSG_GENERIC32 0xf20 +#define AUDPKT_VBIT_OVR0 0xf24 +/* CEC Registers */ +#define CEC_TX_CONTROL 0x1000 +#define CEC_STATUS 0x1004 +#define CEC_CONFIG 0x1008 +#define CEC_ADDR 0x100c +#define CEC_TX_COUNT 0x1020 +#define CEC_TX_DATA3_0 0x1024 +#define CEC_TX_DATA7_4 0x1028 +#define CEC_TX_DATA11_8 0x102c +#define CEC_TX_DATA15_12 0x1030 +#define CEC_RX_COUNT_STATUS 0x1040 +#define CEC_RX_DATA3_0 0x1044 +#define CEC_RX_DATA7_4 0x1048 +#define CEC_RX_DATA11_8 0x104c +#define CEC_RX_DATA15_12 0x1050 +#define CEC_LOCK_CONTROL 0x1054 +#define CEC_RXQUAL_BITTIME_CONFIG 0x1060 +#define CEC_RX_BITTIME_CONFIG 0x1064 +#define CEC_TX_BITTIME_CONFIG 0x1068 +/* eARC RX CMDC Registers */ +#define EARCRX_CMDC_CONFIG0 0x1800 +#define EARCRX_XACTREAD_STOP_CFG BIT(26) +#define EARCRX_XACTREAD_RETRY_CFG BIT(25) +#define EARCRX_CMDC_DSCVR_EARCVALID0_TO_DISC1 BIT(24) +#define EARCRX_CMDC_XACT_RESTART_EN BIT(18) +#define EARCRX_CMDC_CONFIG1 0x1804 +#define EARCRX_CMDC_CONTROL 0x1808 +#define EARCRX_CMDC_HEARTBEAT_LOSS_EN BIT(4) +#define EARCRX_CMDC_DISCOVERY_EN BIT(3) +#define EARCRX_CONNECTOR_HPD BIT(1) +#define EARCRX_CMDC_WHITELIST0_CONFIG 0x180c +#define EARCRX_CMDC_WHITELIST1_CONFIG 0x1810 +#define EARCRX_CMDC_WHITELIST2_CONFIG 0x1814 +#define EARCRX_CMDC_WHITELIST3_CONFIG 0x1818 +#define EARCRX_CMDC_STATUS 0x181c +#define EARCRX_CMDC_XACT_INFO 0x1820 +#define EARCRX_CMDC_XACT_ACTION 0x1824 +#define EARCRX_CMDC_HEARTBEAT_RXSTAT_SE 0x1828 +#define EARCRX_CMDC_HEARTBEAT_STATUS 0x182c +#define EARCRX_CMDC_XACT_WR0 0x1840 +#define EARCRX_CMDC_XACT_WR1 0x1844 +#define EARCRX_CMDC_XACT_WR2 0x1848 +#define EARCRX_CMDC_XACT_WR3 0x184c +#define EARCRX_CMDC_XACT_WR4 0x1850 +#define EARCRX_CMDC_XACT_WR5 0x1854 +#define EARCRX_CMDC_XACT_WR6 0x1858 +#define EARCRX_CMDC_XACT_WR7 0x185c +#define EARCRX_CMDC_XACT_WR8 0x1860 +#define EARCRX_CMDC_XACT_WR9 0x1864 +#define EARCRX_CMDC_XACT_WR10 0x1868 +#define EARCRX_CMDC_XACT_WR11 0x186c +#define EARCRX_CMDC_XACT_WR12 0x1870 +#define EARCRX_CMDC_XACT_WR13 0x1874 +#define EARCRX_CMDC_XACT_WR14 0x1878 +#define EARCRX_CMDC_XACT_WR15 0x187c +#define EARCRX_CMDC_XACT_WR16 0x1880 +#define EARCRX_CMDC_XACT_WR17 0x1884 +#define EARCRX_CMDC_XACT_WR18 0x1888 +#define EARCRX_CMDC_XACT_WR19 0x188c +#define EARCRX_CMDC_XACT_WR20 0x1890 +#define EARCRX_CMDC_XACT_WR21 0x1894 +#define EARCRX_CMDC_XACT_WR22 0x1898 +#define EARCRX_CMDC_XACT_WR23 0x189c +#define EARCRX_CMDC_XACT_WR24 0x18a0 +#define EARCRX_CMDC_XACT_WR25 0x18a4 +#define EARCRX_CMDC_XACT_WR26 0x18a8 +#define EARCRX_CMDC_XACT_WR27 0x18ac +#define EARCRX_CMDC_XACT_WR28 0x18b0 +#define EARCRX_CMDC_XACT_WR29 0x18b4 +#define EARCRX_CMDC_XACT_WR30 0x18b8 +#define EARCRX_CMDC_XACT_WR31 0x18bc +#define EARCRX_CMDC_XACT_WR32 0x18c0 +#define EARCRX_CMDC_XACT_WR33 0x18c4 +#define EARCRX_CMDC_XACT_WR34 0x18c8 +#define EARCRX_CMDC_XACT_WR35 0x18cc +#define EARCRX_CMDC_XACT_WR36 0x18d0 +#define EARCRX_CMDC_XACT_WR37 0x18d4 +#define EARCRX_CMDC_XACT_WR38 0x18d8 +#define EARCRX_CMDC_XACT_WR39 0x18dc +#define EARCRX_CMDC_XACT_WR40 0x18e0 +#define EARCRX_CMDC_XACT_WR41 0x18e4 +#define EARCRX_CMDC_XACT_WR42 0x18e8 +#define EARCRX_CMDC_XACT_WR43 0x18ec +#define EARCRX_CMDC_XACT_WR44 0x18f0 +#define EARCRX_CMDC_XACT_WR45 0x18f4 +#define EARCRX_CMDC_XACT_WR46 0x18f8 +#define EARCRX_CMDC_XACT_WR47 0x18fc +#define EARCRX_CMDC_XACT_WR48 0x1900 +#define EARCRX_CMDC_XACT_WR49 0x1904 +#define EARCRX_CMDC_XACT_WR50 0x1908 +#define EARCRX_CMDC_XACT_WR51 0x190c +#define EARCRX_CMDC_XACT_WR52 0x1910 +#define EARCRX_CMDC_XACT_WR53 0x1914 +#define EARCRX_CMDC_XACT_WR54 0x1918 +#define EARCRX_CMDC_XACT_WR55 0x191c +#define EARCRX_CMDC_XACT_WR56 0x1920 +#define EARCRX_CMDC_XACT_WR57 0x1924 +#define EARCRX_CMDC_XACT_WR58 0x1928 +#define EARCRX_CMDC_XACT_WR59 0x192c +#define EARCRX_CMDC_XACT_WR60 0x1930 +#define EARCRX_CMDC_XACT_WR61 0x1934 +#define EARCRX_CMDC_XACT_WR62 0x1938 +#define EARCRX_CMDC_XACT_WR63 0x193c +#define EARCRX_CMDC_XACT_WR64 0x1940 +#define EARCRX_CMDC_XACT_RD0 0x1960 +#define EARCRX_CMDC_XACT_RD1 0x1964 +#define EARCRX_CMDC_XACT_RD2 0x1968 +#define EARCRX_CMDC_XACT_RD3 0x196c +#define EARCRX_CMDC_XACT_RD4 0x1970 +#define EARCRX_CMDC_XACT_RD5 0x1974 +#define EARCRX_CMDC_XACT_RD6 0x1978 +#define EARCRX_CMDC_XACT_RD7 0x197c +#define EARCRX_CMDC_XACT_RD8 0x1980 +#define EARCRX_CMDC_XACT_RD9 0x1984 +#define EARCRX_CMDC_XACT_RD10 0x1988 +#define EARCRX_CMDC_XACT_RD11 0x198c +#define EARCRX_CMDC_XACT_RD12 0x1990 +#define EARCRX_CMDC_XACT_RD13 0x1994 +#define EARCRX_CMDC_XACT_RD14 0x1998 +#define EARCRX_CMDC_XACT_RD15 0x199c +#define EARCRX_CMDC_XACT_RD16 0x19a0 +#define EARCRX_CMDC_XACT_RD17 0x19a4 +#define EARCRX_CMDC_XACT_RD18 0x19a8 +#define EARCRX_CMDC_XACT_RD19 0x19ac +#define EARCRX_CMDC_XACT_RD20 0x19b0 +#define EARCRX_CMDC_XACT_RD21 0x19b4 +#define EARCRX_CMDC_XACT_RD22 0x19b8 +#define EARCRX_CMDC_XACT_RD23 0x19bc +#define EARCRX_CMDC_XACT_RD24 0x19c0 +#define EARCRX_CMDC_XACT_RD25 0x19c4 +#define EARCRX_CMDC_XACT_RD26 0x19c8 +#define EARCRX_CMDC_XACT_RD27 0x19cc +#define EARCRX_CMDC_XACT_RD28 0x19d0 +#define EARCRX_CMDC_XACT_RD29 0x19d4 +#define EARCRX_CMDC_XACT_RD30 0x19d8 +#define EARCRX_CMDC_XACT_RD31 0x19dc +#define EARCRX_CMDC_XACT_RD32 0x19e0 +#define EARCRX_CMDC_XACT_RD33 0x19e4 +#define EARCRX_CMDC_XACT_RD34 0x19e8 +#define EARCRX_CMDC_XACT_RD35 0x19ec +#define EARCRX_CMDC_XACT_RD36 0x19f0 +#define EARCRX_CMDC_XACT_RD37 0x19f4 +#define EARCRX_CMDC_XACT_RD38 0x19f8 +#define EARCRX_CMDC_XACT_RD39 0x19fc +#define EARCRX_CMDC_XACT_RD40 0x1a00 +#define EARCRX_CMDC_XACT_RD41 0x1a04 +#define EARCRX_CMDC_XACT_RD42 0x1a08 +#define EARCRX_CMDC_XACT_RD43 0x1a0c +#define EARCRX_CMDC_XACT_RD44 0x1a10 +#define EARCRX_CMDC_XACT_RD45 0x1a14 +#define EARCRX_CMDC_XACT_RD46 0x1a18 +#define EARCRX_CMDC_XACT_RD47 0x1a1c +#define EARCRX_CMDC_XACT_RD48 0x1a20 +#define EARCRX_CMDC_XACT_RD49 0x1a24 +#define EARCRX_CMDC_XACT_RD50 0x1a28 +#define EARCRX_CMDC_XACT_RD51 0x1a2c +#define EARCRX_CMDC_XACT_RD52 0x1a30 +#define EARCRX_CMDC_XACT_RD53 0x1a34 +#define EARCRX_CMDC_XACT_RD54 0x1a38 +#define EARCRX_CMDC_XACT_RD55 0x1a3c +#define EARCRX_CMDC_XACT_RD56 0x1a40 +#define EARCRX_CMDC_XACT_RD57 0x1a44 +#define EARCRX_CMDC_XACT_RD58 0x1a48 +#define EARCRX_CMDC_XACT_RD59 0x1a4c +#define EARCRX_CMDC_XACT_RD60 0x1a50 +#define EARCRX_CMDC_XACT_RD61 0x1a54 +#define EARCRX_CMDC_XACT_RD62 0x1a58 +#define EARCRX_CMDC_XACT_RD63 0x1a5c +#define EARCRX_CMDC_XACT_RD64 0x1a60 +#define EARCRX_CMDC_SYNC_CONFIG 0x1b00 +/* eARC RX DMAC Registers */ +#define EARCRX_DMAC_PHY_CONTROL 0x1c00 +#define EARCRX_DMAC_CONFIG 0x1c08 +#define EARCRX_DMAC_CONTROL0 0x1c0c +#define EARCRX_DMAC_AUDIO_EN BIT(1) +#define EARCRX_DMAC_EN BIT(0) +#define EARCRX_DMAC_CONTROL1 0x1c10 +#define EARCRX_DMAC_STATUS 0x1c14 +#define EARCRX_DMAC_CHSTATUS0 0x1c18 +#define EARCRX_DMAC_CHSTATUS1 0x1c1c +#define EARCRX_DMAC_CHSTATUS2 0x1c20 +#define EARCRX_DMAC_CHSTATUS3 0x1c24 +#define EARCRX_DMAC_CHSTATUS4 0x1c28 +#define EARCRX_DMAC_CHSTATUS5 0x1c2c +#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC0 0x1c30 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC1 0x1c34 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC2 0x1c38 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC3 0x1c3c +#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC4 0x1c40 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC5 0x1c44 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC6 0x1c48 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC7 0x1c4c +#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC8 0x1c50 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC9 0x1c54 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC10 0x1c58 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC11 0x1c5c +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT0 0x1c60 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT1 0x1c64 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT2 0x1c68 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT3 0x1c6c +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT4 0x1c70 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT5 0x1c74 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT6 0x1c78 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT7 0x1c7c +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT8 0x1c80 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT9 0x1c84 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT10 0x1c88 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT11 0x1c8c +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT0 0x1c90 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT1 0x1c94 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT2 0x1c98 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT3 0x1c9c +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT4 0x1ca0 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT5 0x1ca4 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT6 0x1ca8 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT7 0x1cac +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT8 0x1cb0 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT9 0x1cb4 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT10 0x1cb8 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT11 0x1cbc +#define EARCRX_DMAC_USRDATA_MSG_GENERIC0 0x1cc0 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC1 0x1cc4 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC2 0x1cc8 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC3 0x1ccc +#define EARCRX_DMAC_USRDATA_MSG_GENERIC4 0x1cd0 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC5 0x1cd4 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC6 0x1cd8 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC7 0x1cdc +#define EARCRX_DMAC_USRDATA_MSG_GENERIC8 0x1ce0 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC9 0x1ce4 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC10 0x1ce8 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC11 0x1cec +#define EARCRX_DMAC_USRDATA_MSG_GENERIC12 0x1cf0 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC13 0x1cf4 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC14 0x1cf8 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC15 0x1cfc +#define EARCRX_DMAC_USRDATA_MSG_GENERIC16 0x1d00 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC17 0x1d04 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC18 0x1d08 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC19 0x1d0c +#define EARCRX_DMAC_USRDATA_MSG_GENERIC20 0x1d10 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC21 0x1d14 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC22 0x1d18 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC23 0x1d1c +#define EARCRX_DMAC_USRDATA_MSG_GENERIC24 0x1d20 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC25 0x1d24 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC26 0x1d28 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC27 0x1d2c +#define EARCRX_DMAC_USRDATA_MSG_GENERIC28 0x1d30 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC29 0x1d34 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC30 0x1d38 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC31 0x1d3c +#define EARCRX_DMAC_USRDATA_MSG_GENERIC32 0x1d40 +#define EARCRX_DMAC_CHSTATUS_STREAMER0 0x1d44 +#define EARCRX_DMAC_CHSTATUS_STREAMER1 0x1d48 +#define EARCRX_DMAC_CHSTATUS_STREAMER2 0x1d4c +#define EARCRX_DMAC_CHSTATUS_STREAMER3 0x1d50 +#define EARCRX_DMAC_CHSTATUS_STREAMER4 0x1d54 +#define EARCRX_DMAC_CHSTATUS_STREAMER5 0x1d58 +#define EARCRX_DMAC_CHSTATUS_STREAMER6 0x1d5c +#define EARCRX_DMAC_CHSTATUS_STREAMER7 0x1d60 +#define EARCRX_DMAC_CHSTATUS_STREAMER8 0x1d64 +#define EARCRX_DMAC_CHSTATUS_STREAMER9 0x1d68 +#define EARCRX_DMAC_CHSTATUS_STREAMER10 0x1d6c +#define EARCRX_DMAC_CHSTATUS_STREAMER11 0x1d70 +#define EARCRX_DMAC_CHSTATUS_STREAMER12 0x1d74 +#define EARCRX_DMAC_CHSTATUS_STREAMER13 0x1d78 +#define EARCRX_DMAC_CHSTATUS_STREAMER14 0x1d7c +#define EARCRX_DMAC_USRDATA_STREAMER0 0x1d80 +/* Main Unit Interrupt Registers */ +#define MAIN_INTVEC_INDEX 0x3000 +#define MAINUNIT_0_INT_STATUS 0x3010 +#define MAINUNIT_0_INT_MASK_N 0x3014 +#define MAINUNIT_0_INT_CLEAR 0x3018 +#define MAINUNIT_0_INT_FORCE 0x301c +#define MAINUNIT_1_INT_STATUS 0x3020 +#define FLT_EXIT_TO_LTSL_IRQ BIT(22) +#define FLT_EXIT_TO_LTS4_IRQ BIT(21) +#define FLT_EXIT_TO_LTSP_IRQ BIT(20) +#define SCDC_NACK_RCVD_IRQ BIT(12) +#define SCDC_RR_REPLY_STOP_IRQ BIT(11) +#define SCDC_UPD_FLAGS_CLR_IRQ BIT(10) +#define SCDC_UPD_FLAGS_CHG_IRQ BIT(9) +#define SCDC_UPD_FLAGS_RD_IRQ BIT(8) +#define I2CM_NACK_RCVD_IRQ BIT(2) +#define I2CM_READ_REQUEST_IRQ BIT(1) +#define I2CM_OP_DONE_IRQ BIT(0) +#define MAINUNIT_1_INT_MASK_N 0x3024 +#define I2CM_NACK_RCVD_MASK_N BIT(2) +#define I2CM_READ_REQUEST_MASK_N BIT(1) +#define I2CM_OP_DONE_MASK_N BIT(0) +#define MAINUNIT_1_INT_CLEAR 0x3028 +#define I2CM_NACK_RCVD_CLEAR BIT(2) +#define I2CM_READ_REQUEST_CLEAR BIT(1) +#define I2CM_OP_DONE_CLEAR BIT(0) +#define MAINUNIT_1_INT_FORCE 0x302c +/* AVPUNIT Interrupt Registers */ +#define AVP_INTVEC_INDEX 0x3800 +#define AVP_0_INT_STATUS 0x3810 +#define AVP_0_INT_MASK_N 0x3814 +#define AVP_0_INT_CLEAR 0x3818 +#define AVP_0_INT_FORCE 0x381c +#define AVP_1_INT_STATUS 0x3820 +#define AVP_1_INT_MASK_N 0x3824 +#define HDCP14_AUTH_CHG_MASK_N BIT(6) +#define AVP_1_INT_CLEAR 0x3828 +#define AVP_1_INT_FORCE 0x382c +#define AVP_2_INT_STATUS 0x3830 +#define AVP_2_INT_MASK_N 0x3834 +#define AVP_2_INT_CLEAR 0x3838 +#define AVP_2_INT_FORCE 0x383c +#define AVP_3_INT_STATUS 0x3840 +#define AVP_3_INT_MASK_N 0x3844 +#define AVP_3_INT_CLEAR 0x3848 +#define AVP_3_INT_FORCE 0x384c +#define AVP_4_INT_STATUS 0x3850 +#define AVP_4_INT_MASK_N 0x3854 +#define AVP_4_INT_CLEAR 0x3858 +#define AVP_4_INT_FORCE 0x385c +#define AVP_5_INT_STATUS 0x3860 +#define AVP_5_INT_MASK_N 0x3864 +#define AVP_5_INT_CLEAR 0x3868 +#define AVP_5_INT_FORCE 0x386c +#define AVP_6_INT_STATUS 0x3870 +#define AVP_6_INT_MASK_N 0x3874 +#define AVP_6_INT_CLEAR 0x3878 +#define AVP_6_INT_FORCE 0x387c +/* CEC Interrupt Registers */ +#define CEC_INT_STATUS 0x4000 +#define CEC_INT_MASK_N 0x4004 +#define CEC_INT_CLEAR 0x4008 +#define CEC_INT_FORCE 0x400c +/* eARC RX Interrupt Registers */ +#define EARCRX_INTVEC_INDEX 0x4800 +#define EARCRX_0_INT_STATUS 0x4810 +#define EARCRX_CMDC_DISCOVERY_TIMEOUT_IRQ BIT(9) +#define EARCRX_CMDC_DISCOVERY_DONE_IRQ BIT(8) +#define EARCRX_0_INT_MASK_N 0x4814 +#define EARCRX_0_INT_CLEAR 0x4818 +#define EARCRX_0_INT_FORCE 0x481c +#define EARCRX_1_INT_STATUS 0x4820 +#define EARCRX_1_INT_MASK_N 0x4824 +#define EARCRX_1_INT_CLEAR 0x4828 +#define EARCRX_1_INT_FORCE 0x482c + +#endif /* __DW_HDMI_QP_H__ */ diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index 0031f3c54882..996733ed2c00 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -3503,6 +3503,9 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev, hdmi->bridge.of_node = pdev->dev.of_node; hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA; + if (hdmi->version >= 0x200a) + hdmi->bridge.ycbcr_420_allowed = plat_data->ycbcr_420_allowed; + memset(&pdevinfo, 0, sizeof(pdevinfo)); pdevinfo.parent = dev; pdevinfo.id = PLATFORM_DEVID_AUTO; diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c index 159c95b26d33..7275e66faefc 100644 --- a/drivers/gpu/drm/bridge/tc358767.c +++ b/drivers/gpu/drm/bridge/tc358767.c @@ -1707,13 +1707,20 @@ static void tc_bridge_mode_set(struct drm_bridge *bridge, { struct tc_data *tc = bridge_to_tc(bridge); - drm_mode_copy(&tc->mode, mode); + drm_mode_copy(&tc->mode, adj); } static const struct drm_edid *tc_edid_read(struct drm_bridge *bridge, struct drm_connector *connector) { struct tc_data *tc = bridge_to_tc(bridge); + int ret; + + ret = tc_get_display_props(tc); + if (ret < 0) { + dev_err(tc->dev, "failed to read display props: %d\n", ret); + return 0; + } return drm_edid_read_ddc(connector, &tc->aux.ddc); } @@ -2405,6 +2412,7 @@ static int tc_probe_bridge_endpoint(struct tc_data *tc) if (tc->pre_emphasis[0] < 0 || tc->pre_emphasis[0] > 2 || tc->pre_emphasis[1] < 0 || tc->pre_emphasis[1] > 2) { dev_err(dev, "Incorrect Pre-Emphasis setting, use either 0=0dB 1=3.5dB 2=6dB\n"); + of_node_put(node); return -EINVAL; } } diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c index 0e8813278a2f..2cb748bbefcd 100644 --- a/drivers/gpu/drm/bridge/tc358768.c +++ b/drivers/gpu/drm/bridge/tc358768.c @@ -125,6 +125,9 @@ #define TC358768_DSI_CONFW_MODE_CLR (6 << 29) #define TC358768_DSI_CONFW_ADDR_DSI_CONTROL (0x3 << 24) +/* TC358768_DSICMD_TX (0x0600) register */ +#define TC358768_DSI_CMDTX_DC_START BIT(0) + static const char * const tc358768_supplies[] = { "vddc", "vddmipi", "vddio" }; @@ -229,6 +232,21 @@ static void tc358768_update_bits(struct tc358768_priv *priv, u32 reg, u32 mask, tc358768_write(priv, reg, tmp); } +static void tc358768_dsicmd_tx(struct tc358768_priv *priv) +{ + u32 val; + + /* start transfer */ + tc358768_write(priv, TC358768_DSICMD_TX, TC358768_DSI_CMDTX_DC_START); + if (priv->error) + return; + + /* wait transfer completion */ + priv->error = regmap_read_poll_timeout(priv->regmap, TC358768_DSICMD_TX, val, + (val & TC358768_DSI_CMDTX_DC_START) == 0, + 100, 100000); +} + static int tc358768_sw_reset(struct tc358768_priv *priv) { /* Assert Reset */ @@ -443,7 +461,9 @@ static int tc358768_dsi_host_attach(struct mipi_dsi_host *host, ret = -EINVAL; ep = of_graph_get_endpoint_by_regs(host->dev->of_node, 0, 0); if (ep) { - ret = of_property_read_u32(ep, "data-lines", &priv->pd_lines); + ret = of_property_read_u32(ep, "bus-width", &priv->pd_lines); + if (ret) + ret = of_property_read_u32(ep, "data-lines", &priv->pd_lines); of_node_put(ep); } @@ -516,8 +536,7 @@ static ssize_t tc358768_dsi_host_transfer(struct mipi_dsi_host *host, } } - /* start transfer */ - tc358768_write(priv, TC358768_DSICMD_TX, 1); + tc358768_dsicmd_tx(priv); ret = tc358768_clear_error(priv); if (ret) diff --git a/drivers/gpu/drm/bridge/thc63lvd1024.c b/drivers/gpu/drm/bridge/thc63lvd1024.c index 674efc489e3a..bba10cf9b4f9 100644 --- a/drivers/gpu/drm/bridge/thc63lvd1024.c +++ b/drivers/gpu/drm/bridge/thc63lvd1024.c @@ -230,7 +230,7 @@ MODULE_DEVICE_TABLE(of, thc63_match); static struct platform_driver thc63_driver = { .probe = thc63_probe, - .remove_new = thc63_remove, + .remove = thc63_remove, .driver = { .name = "thc63lvd1024", .of_match_table = thc63_match, diff --git a/drivers/gpu/drm/bridge/ti-dlpc3433.c b/drivers/gpu/drm/bridge/ti-dlpc3433.c index 6b559e071301..a0a1b5dd794e 100644 --- a/drivers/gpu/drm/bridge/ti-dlpc3433.c +++ b/drivers/gpu/drm/bridge/ti-dlpc3433.c @@ -94,7 +94,7 @@ static const struct regmap_access_table dlpc_volatile_table = { .n_yes_ranges = ARRAY_SIZE(dlpc_volatile_ranges), }; -static struct regmap_config dlpc_regmap_config = { +static const struct regmap_config dlpc_regmap_config = { .reg_bits = 8, .val_bits = 8, .max_register = WR_DSI_PORT_EN, diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c index b1b1e4d5a24a..107a2c0b96c9 100644 --- a/drivers/gpu/drm/bridge/ti-tfp410.c +++ b/drivers/gpu/drm/bridge/ti-tfp410.c @@ -406,7 +406,7 @@ MODULE_DEVICE_TABLE(of, tfp410_match); static struct platform_driver tfp410_platform_driver = { .probe = tfp410_probe, - .remove_new = tfp410_remove, + .remove = tfp410_remove, .driver = { .name = "tfp410-bridge", .of_match_table = tfp410_match, diff --git a/drivers/gpu/drm/bridge/ti-tpd12s015.c b/drivers/gpu/drm/bridge/ti-tpd12s015.c index f9fb35683a27..47b74cb25b14 100644 --- a/drivers/gpu/drm/bridge/ti-tpd12s015.c +++ b/drivers/gpu/drm/bridge/ti-tpd12s015.c @@ -195,7 +195,7 @@ MODULE_DEVICE_TABLE(of, tpd12s015_of_match); static struct platform_driver tpd12s015_driver = { .probe = tpd12s015_probe, - .remove_new = tpd12s015_remove, + .remove = tpd12s015_remove, .driver = { .name = "tpd12s015", .of_match_table = tpd12s015_of_match, diff --git a/drivers/gpu/drm/ci/arm64.config b/drivers/gpu/drm/ci/arm64.config index 66e70ced796f..a8fca079921b 100644 --- a/drivers/gpu/drm/ci/arm64.config +++ b/drivers/gpu/drm/ci/arm64.config @@ -90,7 +90,12 @@ CONFIG_QCOM_GPI_DMA=y CONFIG_USB_ONBOARD_DEV=y CONFIG_NVMEM_QCOM_QFPROM=y CONFIG_PHY_QCOM_USB_SNPS_FEMTO_V2=y - +CONFIG_REGULATOR_QCOM_REFGEN=y +CONFIG_TYPEC_MUX_FSA4480=y +CONFIG_QCOM_PMIC_GLINK=y +CONFIG_UCSI_PMIC_GLINK=y +CONFIG_QRTR=y +CONFIG_QRTR_SMD=y # db410c ethernet CONFIG_USB_RTL8152=y diff --git a/drivers/gpu/drm/ci/build.sh b/drivers/gpu/drm/ci/build.sh index 5a3bdcffae32..139b81db6312 100644 --- a/drivers/gpu/drm/ci/build.sh +++ b/drivers/gpu/drm/ci/build.sh @@ -30,6 +30,7 @@ if [[ "$KERNEL_ARCH" = "arm64" ]]; then DEVICE_TREES+=" arch/arm64/boot/dts/mediatek/mt8192-asurada-spherion-r0.dtb" DEVICE_TREES+=" arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen-nots-r5.dtb" DEVICE_TREES+=" arch/arm64/boot/dts/qcom/sc7180-trogdor-kingoftown.dtb" + DEVICE_TREES+=" arch/arm64/boot/dts/qcom/sm8350-hdk.dtb" elif [[ "$KERNEL_ARCH" = "arm" ]]; then GCC_ARCH="arm-linux-gnueabihf" DEBIAN_ARCH="armhf" diff --git a/drivers/gpu/drm/ci/test.yml b/drivers/gpu/drm/ci/test.yml index 09d8447840e9..f0ef60c8f56d 100644 --- a/drivers/gpu/drm/ci/test.yml +++ b/drivers/gpu/drm/ci/test.yml @@ -162,6 +162,22 @@ msm:sdm845: script: - ./install/bare-metal/cros-servo.sh +msm:sm8350-hdk: + extends: + - .lava-igt:arm64 + stage: msm + parallel: 4 + variables: + BOOT_METHOD: fastboot + DEVICE_TYPE: sm8350-hdk + DRIVER_NAME: msm + DTB: ${DEVICE_TYPE} + FARM: collabora + GPU_VERSION: ${DEVICE_TYPE} + KERNEL_IMAGE_NAME: "Image.gz" + KERNEL_IMAGE_TYPE: "" + RUNNER_TAG: mesa-ci-x86-64-lava-sm8350-hdk + .rockchip-device: variables: DTB: ${DEVICE_TYPE} @@ -286,6 +302,15 @@ i915:tgl: GPU_VERSION: tgl RUNNER_TAG: mesa-ci-x86-64-lava-acer-cp514-2h-1130g7-volteer +i915:jsl: + extends: + - .i915 + parallel: 4 + variables: + DEVICE_TYPE: acer-cb317-1h-c3z6-dedede + GPU_VERSION: jsl + RUNNER_TAG: mesa-ci-x86-64-lava-acer-cb317-1h-c3z6-dedede + .amdgpu: extends: - .lava-igt:x86_64 diff --git a/drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt new file mode 100644 index 000000000000..ed9f7b576843 --- /dev/null +++ b/drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt @@ -0,0 +1,51 @@ +core_setmaster@master-drop-set-user,Fail +i915_module_load@load,Fail +i915_module_load@reload,Fail +i915_module_load@reload-no-display,Fail +i915_module_load@resize-bar,Fail +i915_pm_rpm@gem-execbuf-stress,Timeout +i915_pm_rpm@module-reload,Fail +kms_flip@plain-flip-fb-recreate,Fail +kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail +kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail +kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail +kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail +kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail +kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling,Fail +kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling,Fail +kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail +kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail +kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail +kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-upscaling,Fail +kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-upscaling,Fail +kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-downscaling,Fail +kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-upscaling,Fail +kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-downscaling,Fail +kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-upscaling,Fail +kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail +kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail +kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail +kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail +kms_lease@lease-uevent,Fail +kms_pm_rpm@legacy-planes,Timeout +kms_pm_rpm@legacy-planes-dpms,Timeout +kms_pm_rpm@modeset-stress-extra-wait,Timeout +kms_pm_rpm@universal-planes,Timeout +kms_pm_rpm@universal-planes-dpms,Timeout +kms_rotation_crc@multiplane-rotation,Fail +kms_rotation_crc@multiplane-rotation-cropping-bottom,Fail +kms_rotation_crc@multiplane-rotation-cropping-top,Fail +perf@i915-ref-count,Fail +perf_pmu@busy-accuracy-50,Fail +perf_pmu@module-unload,Fail +perf_pmu@most-busy-idle-check-all,Fail +perf_pmu@rc6,Crash +sysfs_heartbeat_interval@long,Timeout +sysfs_heartbeat_interval@off,Timeout +sysfs_preempt_timeout@off,Timeout +sysfs_timeslice_duration@off,Timeout +xe_module_load@force-load,Fail +xe_module_load@load,Fail +xe_module_load@many-reload,Fail +xe_module_load@reload,Fail +xe_module_load@reload-no-display,Fail diff --git a/drivers/gpu/drm/ci/xfails/i915-jsl-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-jsl-flakes.txt new file mode 100644 index 000000000000..5c3ef4486b9d --- /dev/null +++ b/drivers/gpu/drm/ci/xfails/i915-jsl-flakes.txt @@ -0,0 +1,13 @@ +# Board Name: acer-cb317-1h-c3z6-dedede +# Bug Report: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12475 +# Failure Rate: 100 +# IGT Version: 1.28-ga73311079 +# Linux Version: 6.12.0-rc1 +kms_flip@flip-vs-panning-interruptible + +# Board Name: acer-cb317-1h-c3z6-dedede +# Bug Report: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12476 +# Failure Rate: 100 +# IGT Version: 1.28-ga73311079 +# Linux Version: 6.12.0-rc1 +kms_universal_plane@cursor-fb-leak diff --git a/drivers/gpu/drm/ci/xfails/i915-jsl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-jsl-skips.txt new file mode 100644 index 000000000000..1a3d87c0ca6e --- /dev/null +++ b/drivers/gpu/drm/ci/xfails/i915-jsl-skips.txt @@ -0,0 +1,20 @@ +# Suspend to RAM seems to be broken on this machine +.*suspend.* + +# Skip driver specific tests +^amdgpu.* +^msm.* +nouveau_.* +^panfrost.* +^v3d.* +^vc4.* +^vmwgfx* + +# GEM tests takes ~1000 hours, so skip it +gem_.* + +# trap_err +i915_pm_rc6_residency.* + +# Hangs the machine and timeout occurs +i915_pm_rpm@system-hibernate* diff --git a/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-fails.txt new file mode 100644 index 000000000000..4892c0c70a6d --- /dev/null +++ b/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-fails.txt @@ -0,0 +1,15 @@ +kms_3d,Fail +kms_cursor_legacy@forked-bo,Fail +kms_cursor_legacy@forked-move,Fail +kms_cursor_legacy@single-bo,Fail +kms_cursor_legacy@single-move,Fail +kms_cursor_legacy@torture-bo,Fail +kms_cursor_legacy@torture-move,Fail +kms_hdmi_inject@inject-4k,Fail +kms_lease@lease-uevent,Fail +kms_plane_alpha_blend@alpha-7efc,Fail +kms_plane_alpha_blend@alpha-basic,Fail +kms_plane_alpha_blend@alpha-opaque-fb,Fail +kms_plane_alpha_blend@alpha-transparent-fb,Fail +kms_plane_alpha_blend@constant-alpha-max,Fail +msm/msm_recovery@gpu-fault-parallel,Fail diff --git a/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-flakes.txt new file mode 100644 index 000000000000..c1859d9b165f --- /dev/null +++ b/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-flakes.txt @@ -0,0 +1,6 @@ +# Board Name: sm8350-hdk +# Bug Report: https://gitlab.freedesktop.org/drm/msm/-/issues/65 +# Failure Rate: 100 +# IGT Version: 1.28-ga73311079 +# Linux Version: 6.12.0-rc1 +msm/msm_recovery@gpu-fault diff --git a/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-skips.txt new file mode 100644 index 000000000000..329770c520d9 --- /dev/null +++ b/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-skips.txt @@ -0,0 +1,211 @@ +# Skip driver specific tests +^amdgpu.* +nouveau_.* +^panfrost.* +^v3d.* +^vc4.* +^vmwgfx* + +# Skip intel specific tests +gem_.* +i915_.* +tools_test.* + +# Currently fails and causes coverage loss for other tests +# since core_getversion also fails. +core_hotunplug.* + +# Kernel panic +msm/msm_mapping@ring +# DEBUG - Begin test msm/msm_mapping@ring +# [ 200.874157] [IGT] msm_mapping: executing +# [ 200.880236] [IGT] msm_mapping: starting subtest ring +# [ 200.895243] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=PERMISSION source=CP (0,0,0,1) +# [ 200.906885] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 200.917625] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 200.928353] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 200.939084] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 200.949815] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 200.950227] platform 3d6a000.gmu: [drm:a6xx_hfi_send_msg.constprop.0] *ERROR* Message HFI_H2F_MSG_GX_BW_PERF_VOTE id 25 timed out waiting for response +# [ 200.960467] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 200.960500] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 200.995966] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 201.006702] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 204.213387] platform 3d6a000.gmu: GMU watchdog expired +# [ 205.909103] adreno_fault_handler: 224274 callbacks suppressed +# [ 205.909108] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 205.925794] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 205.936529] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 205.947263] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 205.957997] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 205.968731] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 205.979465] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 205.990199] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 206.000932] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 206.011666] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 210.925090] adreno_fault_handler: 224511 callbacks suppressed +# [ 210.925096] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 210.941781] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 210.952517] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 210.963250] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 210.973985] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 210.984719] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 210.995452] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 211.006186] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 211.016921] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 211.027655] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 215.937100] adreno_fault_handler: 223760 callbacks suppressed +# [ 215.937106] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 215.953824] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 215.964573] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 215.975321] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 215.986067] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 215.996815] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 216.007563] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 216.018310] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 216.029057] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 216.039805] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 220.945182] adreno_fault_handler: 222822 callbacks suppressed +# [ 220.945188] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 220.961897] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 220.972645] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 220.983392] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 220.994140] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 221.004889] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 221.015636] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 221.026383] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 221.037130] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 221.047879] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 225.953179] adreno_fault_handler: 223373 callbacks suppressed +# [ 225.953184] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 225.969883] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 225.980617] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 225.991350] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 226.002084] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 226.012818] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 226.023551] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 226.034285] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 226.045019] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 226.055753] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 228.001087] rcu: INFO: rcu_preempt detected stalls on CPUs/tasks: +# [ 228.007412] rcu: 0-....: (524 ticks this GP) idle=4ffc/1/0x4000000000000000 softirq=9367/9368 fqs=29 +# [ 228.017097] rcu: (detected by 1, t=6504 jiffies, g=29837, q=6 ncpus=8) +# [ 228.023959] Sending NMI from CPU 1 to CPUs 0: +# [ 228.161164] watchdog: BUG: soft lockup - CPU#0 stuck for 26s! [gpu-worker:150] +# [ 228.173169] Modules linked in: +# [ 228.176361] irq event stamp: 2809595 +# [ 228.180083] hardirqs last enabled at (2809594): [<ffffd3bc52cb91ac>] exit_to_kernel_mode+0x38/0x130 +# [ 228.189547] hardirqs last disabled at (2809595): [<ffffd3bc52cb92c8>] el1_interrupt+0x24/0x64 +# [ 228.198377] softirqs last enabled at (1669060): [<ffffd3bc51936f98>] handle_softirqs+0x4a4/0x4bc +# [ 228.207565] softirqs last disabled at (1669063): [<ffffd3bc518905a4>] __do_softirq+0x14/0x20 +# [ 228.216316] CPU: 0 UID: 0 PID: 150 Comm: gpu-worker Not tainted 6.12.0-rc1-g685d530dc83a #1 +# [ 228.224966] Hardware name: Qualcomm Technologies, Inc. SM8350 HDK (DT) +# [ 228.231730] pstate: 00400005 (nzcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--) +# [ 228.238948] pc : tcp_fastretrans_alert+0x0/0x884 +# [ 228.243751] lr : tcp_ack+0x9d4/0x1238 +# [ 228.247562] sp : ffff8000800036d0 +# [ 228.251011] x29: ffff8000800036d0 x28: 000000000000000c x27: 0000000000000001 +# [ 228.258421] x26: ffff704683cd8000 x25: 0000000000000403 x24: ffff70468b7e7c00 +# [ 228.265829] x23: 0000000000000000 x22: 0000000000000004 x21: 000000000000140f +# [ 228.273237] x20: 00000000f1de79f7 x19: 00000000f1de7a5f x18: 0000000000000001 +# [ 228.280644] x17: 00000000302d6762 x16: 632d6b64682d3035 x15: ffff704683c39000 +# [ 228.288051] x14: 00000000000e2000 x13: ffff704683df6000 x12: 0000000000000000 +# [ 228.295458] x11: 00000000000000a0 x10: 0000000000000000 x9 : ffffd3bc551a9a20 +# [ 228.302865] x8 : ffff800080003640 x7 : 0000000000040faa x6 : 00000000ffff9634 +# [ 228.310271] x5 : 00000000000005a8 x4 : ffff800080003788 x3 : ffff80008000377c +# [ 228.317679] x2 : 0000000000000000 x1 : 00000000f1de79f7 x0 : ffff704683cd8000 +# [ 228.325087] Call trace: +# [ 228.327640] tcp_fastretrans_alert+0x0/0x884 +# [ 228.332082] tcp_rcv_established+0x7c4/0x8bc +# [ 228.336523] tcp_v4_do_rcv+0x244/0x31c +# [ 228.340429] tcp_v4_rcv+0xcc4/0x1084 +# [ 228.344155] ip_protocol_deliver_rcu+0x64/0x218 +# [ 228.348862] ip_local_deliver_finish+0xb8/0x1ac +# [ 228.353566] ip_local_deliver+0x84/0x254 +# [ 228.357651] ip_sublist_rcv_finish+0x84/0xb8 +# [ 228.362092] ip_sublist_rcv+0x11c/0x2f0 +# [ 228.366081] ip_list_rcv+0xfc/0x190 +# [ 228.369711] __netif_receive_skb_list_core+0x174/0x208 +# [ 228.375050] netif_receive_skb_list_internal+0x204/0x3ac +# [ 228.380564] napi_complete_done+0x64/0x1d0 +# [ 228.384826] lan78xx_poll+0x71c/0x9cc +# [ 228.388638] __napi_poll.constprop.0+0x3c/0x254 +# [ 228.393341] net_rx_action+0x164/0x2d4 +# [ 228.397244] handle_softirqs+0x128/0x4bc +# [ 228.401329] __do_softirq+0x14/0x20 +# [ 228.404958] ____do_softirq+0x10/0x1c +# [ 228.408769] call_on_irq_stack+0x24/0x4c +# [ 228.412854] do_softirq_own_stack+0x1c/0x28 +# [ 228.417199] __irq_exit_rcu+0x124/0x164 +# [ 228.421188] irq_exit_rcu+0x10/0x38 +# [ 228.424819] el1_interrupt+0x38/0x64 +# [ 228.428546] el1h_64_irq_handler+0x18/0x24 +# [ 228.432807] el1h_64_irq+0x64/0x68 +# [ 228.436354] lock_acquire+0x214/0x32c +# [ 228.440166] __mutex_lock+0x98/0x3d0 +# [ 228.443893] mutex_lock_nested+0x24/0x30 +# [ 228.447978] fault_worker+0x58/0x184 +# [ 228.451704] kthread_worker_fn+0xf4/0x320 +# [ 228.455873] kthread+0x114/0x118 +# [ 228.459243] ret_from_fork+0x10/0x20 +# [ 228.462970] Kernel panic - not syncing: softlockup: hung tasks +# [ 228.469018] CPU: 0 UID: 0 PID: 150 Comm: gpu-worker Tainted: G L 6.12.0-rc1-g685d530dc83a #1 +# [ 228.479190] Tainted: [L]=SOFTLOCKUP +# [ 228.482815] Hardware name: Qualcomm Technologies, Inc. SM8350 HDK (DT) +# [ 228.489574] Call trace: +# [ 228.492125] dump_backtrace+0x98/0xf0 +# [ 228.495931] show_stack+0x18/0x24 +# [ 228.499380] dump_stack_lvl+0x38/0xd0 +# [ 228.503189] dump_stack+0x18/0x24 +# [ 228.506639] panic+0x3bc/0x41c +# [ 228.509826] watchdog_timer_fn+0x254/0x2e4 +# [ 228.514087] __hrtimer_run_queues+0x3b0/0x40c +# [ 228.518612] hrtimer_interrupt+0xe8/0x248 +# [ 228.522777] arch_timer_handler_virt+0x2c/0x44 +# [ 228.527399] handle_percpu_devid_irq+0xa8/0x2c4 +# [ 228.532103] generic_handle_domain_irq+0x2c/0x44 +# [ 228.536902] gic_handle_irq+0x4c/0x11c +# [ 228.540802] do_interrupt_handler+0x50/0x84 +# [ 228.545146] el1_interrupt+0x34/0x64 +# [ 228.548870] el1h_64_irq_handler+0x18/0x24 +# [ 228.553128] el1h_64_irq+0x64/0x68 +# [ 228.556672] tcp_fastretrans_alert+0x0/0x884 +# [ 228.561110] tcp_rcv_established+0x7c4/0x8bc +# [ 228.565548] tcp_v4_do_rcv+0x244/0x31c +# [ 228.569449] tcp_v4_rcv+0xcc4/0x1084 +# [ 228.573171] ip_protocol_deliver_rcu+0x64/0x218 +# [ 228.577873] ip_local_deliver_finish+0xb8/0x1ac +# [ 228.582574] ip_local_deliver+0x84/0x254 +# [ 228.586655] ip_sublist_rcv_finish+0x84/0xb8 +# [ 228.591092] ip_sublist_rcv+0x11c/0x2f0 +# [ 228.595079] ip_list_rcv+0xfc/0x190 +# [ 228.598706] __netif_receive_skb_list_core+0x174/0x208 +# [ 228.604039] netif_receive_skb_list_internal+0x204/0x3ac +# [ 228.609549] napi_complete_done+0x64/0x1d0 +# [ 228.613808] lan78xx_poll+0x71c/0x9cc +# [ 228.617614] __napi_poll.constprop.0+0x3c/0x254 +# [ 228.622314] net_rx_action+0x164/0x2d4 +# [ 228.626214] handle_softirqs+0x128/0x4bc +# [ 228.630297] __do_softirq+0x14/0x20 +# [ 228.633923] ____do_softirq+0x10/0x1c +# [ 228.637729] call_on_irq_stack+0x24/0x4c +# [ 228.641811] do_softirq_own_stack+0x1c/0x28 +# [ 228.646152] __irq_exit_rcu+0x124/0x164 +# [ 228.650139] irq_exit_rcu+0x10/0x38 +# [ 228.653768] el1_interrupt+0x38/0x64 +# [ 228.657491] el1h_64_irq_handler+0x18/0x24 +# [ 228.661750] el1h_64_irq+0x64/0x68 +# [ 228.665293] lock_acquire+0x214/0x32c +# [ 228.669098] __mutex_lock+0x98/0x3d0 +# [ 228.672821] mutex_lock_nested+0x24/0x30 +# [ 228.676903] fault_worker+0x58/0x184 +# [ 228.680626] kthread_worker_fn+0xf4/0x320 +# [ 228.684790] kthread+0x114/0x118 +# [ 228.688156] ret_from_fork+0x10/0x20 +# [ 228.691882] SMP: stopping secondary CPUs +# [ 229.736843] SMP: failed to stop secondary CPUs 1,4 +# [ 229.741827] Kernel Offset: 0x53bbd1880000 from 0xffff800080000000 +# [ 229.748159] PHYS_OFFSET: 0xfff08fba80000000 +# [ 229.752499] CPU features: 0x18,00000017,00200928,4200720b +# [ 229.758095] Memory Limit: none +# [ 229.761291] ---[ end Kernel panic - not syncing: softlockup: hung tasks ]--- diff --git a/drivers/gpu/drm/ci/xfails/requirements.txt b/drivers/gpu/drm/ci/xfails/requirements.txt deleted file mode 100644 index 5e6d48d98e4e..000000000000 --- a/drivers/gpu/drm/ci/xfails/requirements.txt +++ /dev/null @@ -1,17 +0,0 @@ -git+https://gitlab.freedesktop.org/gfx-ci/ci-collate@09e7142715c16f54344ddf97013331ba063b162b -termcolor==2.3.0 - -# ci-collate dependencies -certifi==2023.7.22 -charset-normalizer==3.2.0 -idna==3.4 -pip==23.3 -python-gitlab==3.15.0 -requests==2.31.0 -requests-toolbelt==1.0.0 -ruamel.yaml==0.17.32 -ruamel.yaml.clib==0.2.7 -setuptools==70.0.0 -tenacity==8.2.3 -urllib3==2.0.7 -wheel==0.41.1 diff --git a/drivers/gpu/drm/ci/xfails/update-xfails.py b/drivers/gpu/drm/ci/xfails/update-xfails.py deleted file mode 100755 index a446e98d72a1..000000000000 --- a/drivers/gpu/drm/ci/xfails/update-xfails.py +++ /dev/null @@ -1,204 +0,0 @@ -#!/usr/bin/env python3 - -import argparse -from collections import defaultdict -import difflib -import os -import re -from glcollate import Collate -from termcolor import colored -from urllib.parse import urlparse - - -def get_canonical_name(job_name): - return re.split(r" \d+/\d+", job_name)[0] - - -def get_xfails_file_path(job_name, suffix): - canonical_name = get_canonical_name(job_name) - name = canonical_name.replace(":", "-") - script_dir = os.path.dirname(os.path.abspath(__file__)) - return os.path.join(script_dir, f"{name}-{suffix}.txt") - - -def get_unit_test_name_and_results(unit_test): - if "Artifact results/failures.csv not found" in unit_test or '' == unit_test: - return None, None - unit_test_name, unit_test_result = unit_test.strip().split(",") - return unit_test_name, unit_test_result - - -def read_file(file_path): - try: - with open(file_path, "r") as file: - f = file.readlines() - if len(f): - f[-1] = f[-1].strip() + "\n" - return f - except FileNotFoundError: - return [] - - -def save_file(content, file_path): - # delete file is content is empty - if not content or not any(content): - if os.path.exists(file_path): - os.remove(file_path) - return - - with open(file_path, "w") as file: - file.writelines(content) - - -def is_test_present_on_file(file_content, unit_test_name): - return any(unit_test_name in line for line in file_content) - - -def is_unit_test_present_in_other_jobs(unit_test, job_ids): - return all(unit_test in job_ids[job_id] for job_id in job_ids) - - -def remove_unit_test_if_present(lines, unit_test_name): - if not is_test_present_on_file(lines, unit_test_name): - return - lines[:] = [line for line in lines if unit_test_name not in line] - - -def add_unit_test_if_not_present(lines, unit_test_name, file_name): - # core_getversion is mandatory - if "core_getversion" in unit_test_name: - print("WARNING: core_getversion should pass, not adding it to", os.path.basename(file_name)) - elif all(unit_test_name not in line for line in lines): - lines.append(unit_test_name + "\n") - - -def update_unit_test_result_in_fails_txt(fails_txt, unit_test): - unit_test_name, unit_test_result = get_unit_test_name_and_results(unit_test) - for i, line in enumerate(fails_txt): - if unit_test_name in line: - _, current_result = get_unit_test_name_and_results(line) - fails_txt[i] = unit_test + "\n" - return - - -def add_unit_test_or_update_result_to_fails_if_present(fails_txt, unit_test, fails_txt_path): - unit_test_name, _ = get_unit_test_name_and_results(unit_test) - if not is_test_present_on_file(fails_txt, unit_test_name): - add_unit_test_if_not_present(fails_txt, unit_test, fails_txt_path) - # if it is present but not with the same result - elif not is_test_present_on_file(fails_txt, unit_test): - update_unit_test_result_in_fails_txt(fails_txt, unit_test) - - -def split_unit_test_from_collate(xfails): - for job_name in xfails.keys(): - for job_id in xfails[job_name].copy().keys(): - if "not found" in xfails[job_name][job_id].content_as_str: - del xfails[job_name][job_id] - continue - xfails[job_name][job_id] = xfails[job_name][job_id].content_as_str.splitlines() - - -def get_xfails_from_pipeline_url(pipeline_url): - parsed_url = urlparse(pipeline_url) - path_components = parsed_url.path.strip("/").split("/") - - namespace = path_components[0] - project = path_components[1] - pipeline_id = path_components[-1] - - print("Collating from:", namespace, project, pipeline_id) - xfails = ( - Collate(namespace=namespace, project=project) - .from_pipeline(pipeline_id) - .get_artifact("results/failures.csv") - ) - - split_unit_test_from_collate(xfails) - return xfails - - -def get_xfails_from_pipeline_urls(pipelines_urls): - xfails = defaultdict(dict) - - for url in pipelines_urls: - new_xfails = get_xfails_from_pipeline_url(url) - for key in new_xfails: - xfails[key].update(new_xfails[key]) - - return xfails - - -def print_diff(old_content, new_content, file_name): - diff = difflib.unified_diff(old_content, new_content, lineterm="", fromfile=file_name, tofile=file_name) - diff = [colored(line, "green") if line.startswith("+") else - colored(line, "red") if line.startswith("-") else line for line in diff] - print("\n".join(diff[:3])) - print("".join(diff[3:])) - - -def main(pipelines_urls, only_flakes): - xfails = get_xfails_from_pipeline_urls(pipelines_urls) - - for job_name in xfails.keys(): - fails_txt_path = get_xfails_file_path(job_name, "fails") - flakes_txt_path = get_xfails_file_path(job_name, "flakes") - - fails_txt = read_file(fails_txt_path) - flakes_txt = read_file(flakes_txt_path) - - fails_txt_original = fails_txt.copy() - flakes_txt_original = flakes_txt.copy() - - for job_id in xfails[job_name].keys(): - for unit_test in xfails[job_name][job_id]: - unit_test_name, unit_test_result = get_unit_test_name_and_results(unit_test) - - if not unit_test_name: - continue - - if only_flakes: - remove_unit_test_if_present(fails_txt, unit_test_name) - add_unit_test_if_not_present(flakes_txt, unit_test_name, flakes_txt_path) - continue - - # drop it from flakes if it is present to analyze it again - remove_unit_test_if_present(flakes_txt, unit_test_name) - - if unit_test_result == "UnexpectedPass": - remove_unit_test_if_present(fails_txt, unit_test_name) - # flake result - if not is_unit_test_present_in_other_jobs(unit_test, xfails[job_name]): - add_unit_test_if_not_present(flakes_txt, unit_test_name, flakes_txt_path) - continue - - # flake result - if not is_unit_test_present_in_other_jobs(unit_test, xfails[job_name]): - remove_unit_test_if_present(fails_txt, unit_test_name) - add_unit_test_if_not_present(flakes_txt, unit_test_name, flakes_txt_path) - continue - - # consistent result - add_unit_test_or_update_result_to_fails_if_present(fails_txt, unit_test, - fails_txt_path) - - fails_txt.sort() - flakes_txt.sort() - - if fails_txt != fails_txt_original: - save_file(fails_txt, fails_txt_path) - print_diff(fails_txt_original, fails_txt, os.path.basename(fails_txt_path)) - if flakes_txt != flakes_txt_original: - save_file(flakes_txt, flakes_txt_path) - print_diff(flakes_txt_original, flakes_txt, os.path.basename(flakes_txt_path)) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Update xfails from a given pipeline.") - parser.add_argument("pipeline_urls", nargs="+", type=str, help="URLs to the pipelines to analyze the failures.") - parser.add_argument("--only-flakes", action="store_true", help="Treat every detected failure as a flake, edit *-flakes.txt only.") - - args = parser.parse_args() - - main(args.pipeline_urls, args.only_flakes) - print("Done.") diff --git a/drivers/gpu/drm/display/Kconfig b/drivers/gpu/drm/display/Kconfig index 0f07cf1483ff..6a4e892afcf8 100644 --- a/drivers/gpu/drm/display/Kconfig +++ b/drivers/gpu/drm/display/Kconfig @@ -3,7 +3,7 @@ config DRM_DISPLAY_DP_AUX_BUS tristate depends on DRM - depends on OF || COMPILE_TEST + depends on OF config DRM_DISPLAY_HELPER tristate diff --git a/drivers/gpu/drm/display/drm_bridge_connector.c b/drivers/gpu/drm/display/drm_bridge_connector.c index 3da5b8bf8259..320c297008aa 100644 --- a/drivers/gpu/drm/display/drm_bridge_connector.c +++ b/drivers/gpu/drm/display/drm_bridge_connector.c @@ -397,11 +397,11 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, bridge_connector->encoder = encoder; /* - * TODO: Handle doublescan_allowed, stereo_allowed and - * ycbcr_420_allowed. + * TODO: Handle doublescan_allowed and stereo_allowed. */ connector = &bridge_connector->base; connector->interlace_allowed = true; + connector->ycbcr_420_allowed = true; /* * Initialise connector status handling. First locate the furthest @@ -414,6 +414,8 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, drm_for_each_bridge_in_chain(encoder, bridge) { if (!bridge->interlace_allowed) connector->interlace_allowed = false; + if (!bridge->ycbcr_420_allowed) + connector->ycbcr_420_allowed = false; if (bridge->ops & DRM_BRIDGE_OP_EDID) bridge_connector->bridge_edid = bridge; diff --git a/drivers/gpu/drm/display/drm_dp_aux_bus.c b/drivers/gpu/drm/display/drm_dp_aux_bus.c index d810529ebfb6..ec7eac6b595f 100644 --- a/drivers/gpu/drm/display/drm_dp_aux_bus.c +++ b/drivers/gpu/drm/display/drm_dp_aux_bus.c @@ -292,7 +292,7 @@ int of_dp_aux_populate_bus(struct drm_dp_aux *aux, aux_ep->dev.parent = aux->dev; aux_ep->dev.bus = &dp_aux_bus_type; aux_ep->dev.type = &dp_aux_device_type_type; - aux_ep->dev.of_node = of_node_get(np); + device_set_node(&aux_ep->dev, of_fwnode_handle(of_node_get(np))); dev_set_name(&aux_ep->dev, "aux-%s", dev_name(aux->dev)); ret = device_register(&aux_ep->dev); diff --git a/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c index 14a2a8473682..c491e3203bf1 100644 --- a/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c +++ b/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c @@ -160,11 +160,11 @@ EXPORT_SYMBOL(drm_dp_dual_mode_write); static bool is_hdmi_adaptor(const char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN]) { - static const char dp_dual_mode_hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN] = + static const char dp_dual_mode_hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN + 1] = "DP-HDMI ADAPTOR\x04"; return memcmp(hdmi_id, dp_dual_mode_hdmi_id, - sizeof(dp_dual_mode_hdmi_id)) == 0; + DP_DUAL_MODE_HDMI_ID_LEN) == 0; } static bool is_type1_adaptor(uint8_t adaptor_id) diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c index ac90118b9e7a..dc4446d589e7 100644 --- a/drivers/gpu/drm/display/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c @@ -320,6 +320,9 @@ static bool drm_dp_decode_sideband_msg_hdr(const struct drm_dp_mst_topology_mgr hdr->broadcast = (buf[idx] >> 7) & 0x1; hdr->path_msg = (buf[idx] >> 6) & 0x1; hdr->msg_len = buf[idx] & 0x3f; + if (hdr->msg_len < 1) /* min space for body CRC */ + return false; + idx++; hdr->somt = (buf[idx] >> 7) & 0x1; hdr->eomt = (buf[idx] >> 6) & 0x1; @@ -3697,8 +3700,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms ret = 0; mgr->payload_id_table_cleared = false; - memset(&mgr->down_rep_recv, 0, sizeof(mgr->down_rep_recv)); - memset(&mgr->up_req_recv, 0, sizeof(mgr->up_req_recv)); + mgr->reset_rx_state = true; } out_unlock: @@ -3856,6 +3858,11 @@ out_fail: } EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume); +static void reset_msg_rx_state(struct drm_dp_sideband_msg_rx *msg) +{ + memset(msg, 0, sizeof(*msg)); +} + static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up, struct drm_dp_mst_branch **mstb) @@ -3934,6 +3941,34 @@ drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up, return true; } +static int get_msg_request_type(u8 data) +{ + return data & 0x7f; +} + +static bool verify_rx_request_type(struct drm_dp_mst_topology_mgr *mgr, + const struct drm_dp_sideband_msg_tx *txmsg, + const struct drm_dp_sideband_msg_rx *rxmsg) +{ + const struct drm_dp_sideband_msg_hdr *hdr = &rxmsg->initial_hdr; + const struct drm_dp_mst_branch *mstb = txmsg->dst; + int tx_req_type = get_msg_request_type(txmsg->msg[0]); + int rx_req_type = get_msg_request_type(rxmsg->msg[0]); + char rad_str[64]; + + if (tx_req_type == rx_req_type) + return true; + + drm_dp_mst_rad_to_str(mstb->rad, mstb->lct, rad_str, sizeof(rad_str)); + drm_dbg_kms(mgr->dev, + "Got unexpected MST reply, mstb: %p seqno: %d lct: %d rad: %s rx_req_type: %s (%02x) != tx_req_type: %s (%02x)\n", + mstb, hdr->seqno, mstb->lct, rad_str, + drm_dp_mst_req_type_str(rx_req_type), rx_req_type, + drm_dp_mst_req_type_str(tx_req_type), tx_req_type); + + return false; +} + static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) { struct drm_dp_sideband_msg_tx *txmsg; @@ -3949,9 +3984,9 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) /* find the message */ mutex_lock(&mgr->qlock); + txmsg = list_first_entry_or_null(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next); - mutex_unlock(&mgr->qlock); /* Were we actually expecting a response, and from this mstb? */ if (!txmsg || txmsg->dst != mstb) { @@ -3960,6 +3995,15 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) hdr = &msg->initial_hdr; drm_dbg_kms(mgr->dev, "Got MST reply with no msg %p %d %d %02x %02x\n", mstb, hdr->seqno, hdr->lct, hdr->rad[0], msg->msg[0]); + + mutex_unlock(&mgr->qlock); + + goto out_clear_reply; + } + + if (!verify_rx_request_type(mgr, txmsg, msg)) { + mutex_unlock(&mgr->qlock); + goto out_clear_reply; } @@ -3975,20 +4019,15 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) txmsg->reply.u.nak.nak_data); } - memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx)); - drm_dp_mst_topology_put_mstb(mstb); - - mutex_lock(&mgr->qlock); txmsg->state = DRM_DP_SIDEBAND_TX_RX; list_del(&txmsg->next); + mutex_unlock(&mgr->qlock); wake_up_all(&mgr->tx_waitq); - return 0; - out_clear_reply: - memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx)); + reset_msg_rx_state(msg); out: if (mstb) drm_dp_mst_topology_put_mstb(mstb); @@ -4070,16 +4109,20 @@ static void drm_dp_mst_up_req_work(struct work_struct *work) static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) { struct drm_dp_pending_up_req *up_req; + struct drm_dp_mst_branch *mst_primary; + int ret = 0; if (!drm_dp_get_one_sb_msg(mgr, true, NULL)) - goto out; + goto out_clear_reply; if (!mgr->up_req_recv.have_eomt) return 0; up_req = kzalloc(sizeof(*up_req), GFP_KERNEL); - if (!up_req) - return -ENOMEM; + if (!up_req) { + ret = -ENOMEM; + goto out_clear_reply; + } INIT_LIST_HEAD(&up_req->next); @@ -4090,10 +4133,19 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) drm_dbg_kms(mgr->dev, "Received unknown up req type, ignoring: %x\n", up_req->msg.req_type); kfree(up_req); - goto out; + goto out_clear_reply; } - drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type, + mutex_lock(&mgr->lock); + mst_primary = mgr->mst_primary; + if (!mst_primary || !drm_dp_mst_topology_try_get_mstb(mst_primary)) { + mutex_unlock(&mgr->lock); + kfree(up_req); + goto out_clear_reply; + } + mutex_unlock(&mgr->lock); + + drm_dp_send_up_ack_reply(mgr, mst_primary, up_req->msg.req_type, false); if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) { @@ -4110,13 +4162,13 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) conn_stat->peer_device_type); mutex_lock(&mgr->probe_lock); - handle_csn = mgr->mst_primary->link_address_sent; + handle_csn = mst_primary->link_address_sent; mutex_unlock(&mgr->probe_lock); if (!handle_csn) { drm_dbg_kms(mgr->dev, "Got CSN before finish topology probing. Skip it."); kfree(up_req); - goto out; + goto out_put_primary; } } else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) { const struct drm_dp_resource_status_notify *res_stat = @@ -4133,9 +4185,22 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) mutex_unlock(&mgr->up_req_lock); queue_work(system_long_wq, &mgr->up_req_work); -out: - memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); - return 0; +out_put_primary: + drm_dp_mst_topology_put_mstb(mst_primary); +out_clear_reply: + reset_msg_rx_state(&mgr->up_req_recv); + return ret; +} + +static void update_msg_rx_state(struct drm_dp_mst_topology_mgr *mgr) +{ + mutex_lock(&mgr->lock); + if (mgr->reset_rx_state) { + mgr->reset_rx_state = false; + reset_msg_rx_state(&mgr->down_rep_recv); + reset_msg_rx_state(&mgr->up_req_recv); + } + mutex_unlock(&mgr->lock); } /** @@ -4172,6 +4237,8 @@ int drm_dp_mst_hpd_irq_handle_event(struct drm_dp_mst_topology_mgr *mgr, const u *handled = true; } + update_msg_rx_state(mgr); + if (esi[1] & DP_DOWN_REP_MSG_RDY) { ret = drm_dp_mst_handle_down_rep(mgr); *handled = true; diff --git a/drivers/gpu/drm/drm_aperture.c b/drivers/gpu/drm/drm_aperture.c deleted file mode 100644 index 5729f3bb4398..000000000000 --- a/drivers/gpu/drm/drm_aperture.c +++ /dev/null @@ -1,192 +0,0 @@ -// SPDX-License-Identifier: MIT - -#include <linux/aperture.h> -#include <linux/platform_device.h> - -#include <drm/drm_aperture.h> -#include <drm/drm_drv.h> -#include <drm/drm_print.h> - -/** - * DOC: overview - * - * A graphics device might be supported by different drivers, but only one - * driver can be active at any given time. Many systems load a generic - * graphics drivers, such as EFI-GOP or VESA, early during the boot process. - * During later boot stages, they replace the generic driver with a dedicated, - * hardware-specific driver. To take over the device the dedicated driver - * first has to remove the generic driver. DRM aperture functions manage - * ownership of DRM framebuffer memory and hand-over between drivers. - * - * DRM drivers should call drm_aperture_remove_conflicting_framebuffers() - * at the top of their probe function. The function removes any generic - * driver that is currently associated with the given framebuffer memory. - * If the framebuffer is located at PCI BAR 0, the rsp code looks as in the - * example given below. - * - * .. code-block:: c - * - * static const struct drm_driver example_driver = { - * ... - * }; - * - * static int remove_conflicting_framebuffers(struct pci_dev *pdev) - * { - * resource_size_t base, size; - * int ret; - * - * base = pci_resource_start(pdev, 0); - * size = pci_resource_len(pdev, 0); - * - * return drm_aperture_remove_conflicting_framebuffers(base, size, - * &example_driver); - * } - * - * static int probe(struct pci_dev *pdev) - * { - * int ret; - * - * // Remove any generic drivers... - * ret = remove_conflicting_framebuffers(pdev); - * if (ret) - * return ret; - * - * // ... and initialize the hardware. - * ... - * - * drm_dev_register(); - * - * return 0; - * } - * - * PCI device drivers should call - * drm_aperture_remove_conflicting_pci_framebuffers() and let it detect the - * framebuffer apertures automatically. Device drivers without knowledge of - * the framebuffer's location shall call drm_aperture_remove_framebuffers(), - * which removes all drivers for known framebuffer. - * - * Drivers that are susceptible to being removed by other drivers, such as - * generic EFI or VESA drivers, have to register themselves as owners of their - * given framebuffer memory. Ownership of the framebuffer memory is achieved - * by calling devm_aperture_acquire_from_firmware(). On success, the driver - * is the owner of the framebuffer range. The function fails if the - * framebuffer is already owned by another driver. See below for an example. - * - * .. code-block:: c - * - * static int acquire_framebuffers(struct drm_device *dev, struct platform_device *pdev) - * { - * resource_size_t base, size; - * - * mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - * if (!mem) - * return -EINVAL; - * base = mem->start; - * size = resource_size(mem); - * - * return devm_acquire_aperture_from_firmware(dev, base, size); - * } - * - * static int probe(struct platform_device *pdev) - * { - * struct drm_device *dev; - * int ret; - * - * // ... Initialize the device... - * dev = devm_drm_dev_alloc(); - * ... - * - * // ... and acquire ownership of the framebuffer. - * ret = acquire_framebuffers(dev, pdev); - * if (ret) - * return ret; - * - * drm_dev_register(dev, 0); - * - * return 0; - * } - * - * The generic driver is now subject to forced removal by other drivers. This - * only works for platform drivers that support hot unplug. - * When a driver calls drm_aperture_remove_conflicting_framebuffers() et al. - * for the registered framebuffer range, the aperture helpers call - * platform_device_unregister() and the generic driver unloads itself. It - * may not access the device's registers, framebuffer memory, ROM, etc - * afterwards. - */ - -/** - * devm_aperture_acquire_from_firmware - Acquires ownership of a firmware framebuffer - * on behalf of a DRM driver. - * @dev: the DRM device to own the framebuffer memory - * @base: the framebuffer's byte offset in physical memory - * @size: the framebuffer size in bytes - * - * Installs the given device as the new owner of the framebuffer. The function - * expects the framebuffer to be provided by a platform device that has been - * set up by firmware. Firmware can be any generic interface, such as EFI, - * VESA, VGA, etc. If the native hardware driver takes over ownership of the - * framebuffer range, the firmware state gets lost. Aperture helpers will then - * unregister the platform device automatically. Acquired apertures are - * released automatically if the underlying device goes away. - * - * The function fails if the framebuffer range, or parts of it, is currently - * owned by another driver. To evict current owners, callers should use - * drm_aperture_remove_conflicting_framebuffers() et al. before calling this - * function. The function also fails if the given device is not a platform - * device. - * - * Returns: - * 0 on success, or a negative errno value otherwise. - */ -int devm_aperture_acquire_from_firmware(struct drm_device *dev, resource_size_t base, - resource_size_t size) -{ - struct platform_device *pdev; - - if (drm_WARN_ON(dev, !dev_is_platform(dev->dev))) - return -EINVAL; - - pdev = to_platform_device(dev->dev); - - return devm_aperture_acquire_for_platform_device(pdev, base, size); -} -EXPORT_SYMBOL(devm_aperture_acquire_from_firmware); - -/** - * drm_aperture_remove_conflicting_framebuffers - remove existing framebuffers in the given range - * @base: the aperture's base address in physical memory - * @size: aperture size in bytes - * @req_driver: requesting DRM driver - * - * This function removes graphics device drivers which use the memory range described by - * @base and @size. - * - * Returns: - * 0 on success, or a negative errno code otherwise - */ -int drm_aperture_remove_conflicting_framebuffers(resource_size_t base, resource_size_t size, - const struct drm_driver *req_driver) -{ - return aperture_remove_conflicting_devices(base, size, req_driver->name); -} -EXPORT_SYMBOL(drm_aperture_remove_conflicting_framebuffers); - -/** - * drm_aperture_remove_conflicting_pci_framebuffers - remove existing framebuffers for PCI devices - * @pdev: PCI device - * @req_driver: requesting DRM driver - * - * This function removes graphics device drivers using the memory range configured - * for any of @pdev's memory bars. The function assumes that a PCI device with - * shadowed ROM drives a primary display and so kicks out vga16fb. - * - * Returns: - * 0 on success, or a negative errno code otherwise - */ -int drm_aperture_remove_conflicting_pci_framebuffers(struct pci_dev *pdev, - const struct drm_driver *req_driver) -{ - return aperture_remove_conflicting_pci_devices(pdev, req_driver->name); -} -EXPORT_SYMBOL(drm_aperture_remove_conflicting_pci_framebuffers); diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 0fc99da93afe..9ea2611770f4 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -1132,6 +1132,8 @@ static void drm_atomic_connector_print_state(struct drm_printer *p, drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name); drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)"); drm_printf(p, "\tself_refresh_aware=%d\n", state->self_refresh_aware); + drm_printf(p, "\tinterlace_allowed=%d\n", connector->interlace_allowed); + drm_printf(p, "\tycbcr_420_allowed=%d\n", connector->ycbcr_420_allowed); drm_printf(p, "\tmax_requested_bpc=%d\n", state->max_requested_bpc); drm_printf(p, "\tcolorspace=%s\n", drm_get_colorspace_name(state->colorspace)); diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c index bfedcbf516db..549b28a5918c 100644 --- a/drivers/gpu/drm/drm_client.c +++ b/drivers/gpu/drm/drm_client.c @@ -10,7 +10,6 @@ #include <linux/slab.h> #include <drm/drm_client.h> -#include <drm/drm_debugfs.h> #include <drm/drm_device.h> #include <drm/drm_drv.h> #include <drm/drm_file.h> @@ -172,99 +171,6 @@ void drm_client_release(struct drm_client_dev *client) } EXPORT_SYMBOL(drm_client_release); -/** - * drm_client_dev_unregister - Unregister clients - * @dev: DRM device - * - * This function releases all clients by calling each client's - * &drm_client_funcs.unregister callback. The callback function - * is responsibe for releaseing all resources including the client - * itself. - * - * The helper drm_dev_unregister() calls this function. Drivers - * that use it don't need to call this function themselves. - */ -void drm_client_dev_unregister(struct drm_device *dev) -{ - struct drm_client_dev *client, *tmp; - - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - return; - - mutex_lock(&dev->clientlist_mutex); - list_for_each_entry_safe(client, tmp, &dev->clientlist, list) { - list_del(&client->list); - if (client->funcs && client->funcs->unregister) { - client->funcs->unregister(client); - } else { - drm_client_release(client); - kfree(client); - } - } - mutex_unlock(&dev->clientlist_mutex); -} -EXPORT_SYMBOL(drm_client_dev_unregister); - -/** - * drm_client_dev_hotplug - Send hotplug event to clients - * @dev: DRM device - * - * This function calls the &drm_client_funcs.hotplug callback on the attached clients. - * - * drm_kms_helper_hotplug_event() calls this function, so drivers that use it - * don't need to call this function themselves. - */ -void drm_client_dev_hotplug(struct drm_device *dev) -{ - struct drm_client_dev *client; - int ret; - - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - return; - - if (!dev->mode_config.num_connector) { - drm_dbg_kms(dev, "No connectors found, will not send hotplug events!\n"); - return; - } - - mutex_lock(&dev->clientlist_mutex); - list_for_each_entry(client, &dev->clientlist, list) { - if (!client->funcs || !client->funcs->hotplug) - continue; - - if (client->hotplug_failed) - continue; - - ret = client->funcs->hotplug(client); - drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret); - if (ret) - client->hotplug_failed = true; - } - mutex_unlock(&dev->clientlist_mutex); -} -EXPORT_SYMBOL(drm_client_dev_hotplug); - -void drm_client_dev_restore(struct drm_device *dev) -{ - struct drm_client_dev *client; - int ret; - - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - return; - - mutex_lock(&dev->clientlist_mutex); - list_for_each_entry(client, &dev->clientlist, list) { - if (!client->funcs || !client->funcs->restore) - continue; - - ret = client->funcs->restore(client); - drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret); - if (!ret) /* The first one to return zero gets the privilege to restore */ - break; - } - mutex_unlock(&dev->clientlist_mutex); -} - static void drm_client_buffer_delete(struct drm_client_buffer *buffer) { if (buffer->gem) { @@ -584,30 +490,3 @@ int drm_client_framebuffer_flush(struct drm_client_buffer *buffer, struct drm_re 0, 0, NULL, 0); } EXPORT_SYMBOL(drm_client_framebuffer_flush); - -#ifdef CONFIG_DEBUG_FS -static int drm_client_debugfs_internal_clients(struct seq_file *m, void *data) -{ - struct drm_debugfs_entry *entry = m->private; - struct drm_device *dev = entry->dev; - struct drm_printer p = drm_seq_file_printer(m); - struct drm_client_dev *client; - - mutex_lock(&dev->clientlist_mutex); - list_for_each_entry(client, &dev->clientlist, list) - drm_printf(&p, "%s\n", client->name); - mutex_unlock(&dev->clientlist_mutex); - - return 0; -} - -static const struct drm_debugfs_info drm_client_debugfs_list[] = { - { "internal_clients", drm_client_debugfs_internal_clients, 0 }, -}; - -void drm_client_debugfs_init(struct drm_device *dev) -{ - drm_debugfs_add_files(dev, drm_client_debugfs_list, - ARRAY_SIZE(drm_client_debugfs_list)); -} -#endif diff --git a/drivers/gpu/drm/drm_client_event.c b/drivers/gpu/drm/drm_client_event.c new file mode 100644 index 000000000000..e303de564485 --- /dev/null +++ b/drivers/gpu/drm/drm_client_event.c @@ -0,0 +1,197 @@ +// SPDX-License-Identifier: GPL-2.0 or MIT +/* + * Copyright 2018 Noralf Trønnes + */ + +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/seq_file.h> + +#include <drm/drm_client.h> +#include <drm/drm_client_event.h> +#include <drm/drm_debugfs.h> +#include <drm/drm_device.h> +#include <drm/drm_drv.h> +#include <drm/drm_print.h> + +#include "drm_internal.h" + +/** + * drm_client_dev_unregister - Unregister clients + * @dev: DRM device + * + * This function releases all clients by calling each client's + * &drm_client_funcs.unregister callback. The callback function + * is responsibe for releaseing all resources including the client + * itself. + * + * The helper drm_dev_unregister() calls this function. Drivers + * that use it don't need to call this function themselves. + */ +void drm_client_dev_unregister(struct drm_device *dev) +{ + struct drm_client_dev *client, *tmp; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return; + + mutex_lock(&dev->clientlist_mutex); + list_for_each_entry_safe(client, tmp, &dev->clientlist, list) { + list_del(&client->list); + if (client->funcs && client->funcs->unregister) { + client->funcs->unregister(client); + } else { + drm_client_release(client); + kfree(client); + } + } + mutex_unlock(&dev->clientlist_mutex); +} +EXPORT_SYMBOL(drm_client_dev_unregister); + +/** + * drm_client_dev_hotplug - Send hotplug event to clients + * @dev: DRM device + * + * This function calls the &drm_client_funcs.hotplug callback on the attached clients. + * + * drm_kms_helper_hotplug_event() calls this function, so drivers that use it + * don't need to call this function themselves. + */ +void drm_client_dev_hotplug(struct drm_device *dev) +{ + struct drm_client_dev *client; + int ret; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return; + + if (!dev->mode_config.num_connector) { + drm_dbg_kms(dev, "No connectors found, will not send hotplug events!\n"); + return; + } + + mutex_lock(&dev->clientlist_mutex); + list_for_each_entry(client, &dev->clientlist, list) { + if (!client->funcs || !client->funcs->hotplug) + continue; + + if (client->hotplug_failed) + continue; + + ret = client->funcs->hotplug(client); + drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret); + if (ret) + client->hotplug_failed = true; + } + mutex_unlock(&dev->clientlist_mutex); +} +EXPORT_SYMBOL(drm_client_dev_hotplug); + +void drm_client_dev_restore(struct drm_device *dev) +{ + struct drm_client_dev *client; + int ret; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return; + + mutex_lock(&dev->clientlist_mutex); + list_for_each_entry(client, &dev->clientlist, list) { + if (!client->funcs || !client->funcs->restore) + continue; + + ret = client->funcs->restore(client); + drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret); + if (!ret) /* The first one to return zero gets the privilege to restore */ + break; + } + mutex_unlock(&dev->clientlist_mutex); +} + +static int drm_client_suspend(struct drm_client_dev *client, bool holds_console_lock) +{ + struct drm_device *dev = client->dev; + int ret = 0; + + if (drm_WARN_ON_ONCE(dev, client->suspended)) + return 0; + + if (client->funcs && client->funcs->suspend) + ret = client->funcs->suspend(client, holds_console_lock); + drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret); + + client->suspended = true; + + return ret; +} + +void drm_client_dev_suspend(struct drm_device *dev, bool holds_console_lock) +{ + struct drm_client_dev *client; + + mutex_lock(&dev->clientlist_mutex); + list_for_each_entry(client, &dev->clientlist, list) { + if (!client->suspended) + drm_client_suspend(client, holds_console_lock); + } + mutex_unlock(&dev->clientlist_mutex); +} +EXPORT_SYMBOL(drm_client_dev_suspend); + +static int drm_client_resume(struct drm_client_dev *client, bool holds_console_lock) +{ + struct drm_device *dev = client->dev; + int ret = 0; + + if (drm_WARN_ON_ONCE(dev, !client->suspended)) + return 0; + + if (client->funcs && client->funcs->resume) + ret = client->funcs->resume(client, holds_console_lock); + drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret); + + client->suspended = false; + + return ret; +} + +void drm_client_dev_resume(struct drm_device *dev, bool holds_console_lock) +{ + struct drm_client_dev *client; + + mutex_lock(&dev->clientlist_mutex); + list_for_each_entry(client, &dev->clientlist, list) { + if (client->suspended) + drm_client_resume(client, holds_console_lock); + } + mutex_unlock(&dev->clientlist_mutex); +} +EXPORT_SYMBOL(drm_client_dev_resume); + +#ifdef CONFIG_DEBUG_FS +static int drm_client_debugfs_internal_clients(struct seq_file *m, void *data) +{ + struct drm_debugfs_entry *entry = m->private; + struct drm_device *dev = entry->dev; + struct drm_printer p = drm_seq_file_printer(m); + struct drm_client_dev *client; + + mutex_lock(&dev->clientlist_mutex); + list_for_each_entry(client, &dev->clientlist, list) + drm_printf(&p, "%s\n", client->name); + mutex_unlock(&dev->clientlist_mutex); + + return 0; +} + +static const struct drm_debugfs_info drm_client_debugfs_list[] = { + { "internal_clients", drm_client_debugfs_internal_clients, 0 }, +}; + +void drm_client_debugfs_init(struct drm_device *dev) +{ + drm_debugfs_add_files(dev, drm_client_debugfs_list, + ARRAY_SIZE(drm_client_debugfs_list)); +} +#endif diff --git a/drivers/gpu/drm/drm_client_setup.c b/drivers/gpu/drm/drm_client_setup.c index 5969c4ffe31b..c14221ca5a0d 100644 --- a/drivers/gpu/drm/drm_client_setup.c +++ b/drivers/gpu/drm/drm_client_setup.c @@ -64,3 +64,6 @@ void drm_client_setup_with_color_mode(struct drm_device *dev, unsigned int color drm_client_setup_with_fourcc(dev, fourcc); } EXPORT_SYMBOL(drm_client_setup_with_color_mode); + +MODULE_DESCRIPTION("In-kernel DRM clients"); +MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index 52783eac078e..536409a35df4 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c @@ -32,7 +32,6 @@ #include <drm/drm_atomic.h> #include <drm/drm_auth.h> #include <drm/drm_bridge.h> -#include <drm/drm_client.h> #include <drm/drm_debugfs.h> #include <drm/drm_device.h> #include <drm/drm_drv.h> diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index ac30b0ec9d93..c2c172eb25df 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -38,7 +38,7 @@ #include <drm/drm_accel.h> #include <drm/drm_cache.h> -#include <drm/drm_client.h> +#include <drm/drm_client_event.h> #include <drm/drm_color_mgmt.h> #include <drm/drm_drv.h> #include <drm/drm_file.h> diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index d5e8994345bb..c9008113111b 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -697,6 +697,7 @@ void drm_fb_helper_damage_area(struct fb_info *info, u32 x, u32 y, u32 width, u3 } EXPORT_SYMBOL(drm_fb_helper_damage_area); +#ifdef CONFIG_FB_DEFERRED_IO /** * drm_fb_helper_deferred_io() - fbdev deferred_io callback function * @info: fb_info struct pointer @@ -740,6 +741,7 @@ void drm_fb_helper_deferred_io(struct fb_info *info, struct list_head *pagerefli } } EXPORT_SYMBOL(drm_fb_helper_deferred_io); +#endif /** * drm_fb_helper_set_suspend - wrapper around fb_set_suspend diff --git a/drivers/gpu/drm/drm_fbdev_client.c b/drivers/gpu/drm/drm_fbdev_client.c index a09382afe2fb..246fb63ab250 100644 --- a/drivers/gpu/drm/drm_fbdev_client.c +++ b/drivers/gpu/drm/drm_fbdev_client.c @@ -61,11 +61,37 @@ err_drm_err: return ret; } +static int drm_fbdev_client_suspend(struct drm_client_dev *client, bool holds_console_lock) +{ + struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); + + if (holds_console_lock) + drm_fb_helper_set_suspend(fb_helper, true); + else + drm_fb_helper_set_suspend_unlocked(fb_helper, true); + + return 0; +} + +static int drm_fbdev_client_resume(struct drm_client_dev *client, bool holds_console_lock) +{ + struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); + + if (holds_console_lock) + drm_fb_helper_set_suspend(fb_helper, false); + else + drm_fb_helper_set_suspend_unlocked(fb_helper, false); + + return 0; +} + static const struct drm_client_funcs drm_fbdev_client_funcs = { .owner = THIS_MODULE, .unregister = drm_fbdev_client_unregister, .restore = drm_fbdev_client_restore, .hotplug = drm_fbdev_client_hotplug, + .suspend = drm_fbdev_client_suspend, + .resume = drm_fbdev_client_resume, }; /** @@ -76,8 +102,8 @@ static const struct drm_client_funcs drm_fbdev_client_funcs = { * * This function sets up fbdev emulation. Restore, hotplug events and * teardown are all taken care of. Drivers that do suspend/resume need - * to call drm_fb_helper_set_suspend_unlocked() themselves. Simple - * drivers might use drm_mode_config_helper_suspend(). + * to call drm_client_dev_suspend() and drm_client_dev_resume() by + * themselves. Simple drivers might use drm_mode_config_helper_suspend(). * * This function is safe to call even when there are no connectors present. * Setup will be retried on the next hotplug event. diff --git a/drivers/gpu/drm/drm_fbdev_dma.c b/drivers/gpu/drm/drm_fbdev_dma.c index 2343af1b09bc..b14b581c059d 100644 --- a/drivers/gpu/drm/drm_fbdev_dma.c +++ b/drivers/gpu/drm/drm_fbdev_dma.c @@ -48,7 +48,8 @@ static void drm_fbdev_dma_fb_destroy(struct fb_info *info) if (!fb_helper->dev) return; - fb_deferred_io_cleanup(info); + if (info->fbdefio) + fb_deferred_io_cleanup(info); drm_fb_helper_fini(fb_helper); drm_client_buffer_vunmap(fb_helper->buffer); diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index 2ee1c3233b0c..cb5f22f5bbb6 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -40,7 +40,7 @@ #include <linux/slab.h> #include <linux/vga_switcheroo.h> -#include <drm/drm_client.h> +#include <drm/drm_client_event.h> #include <drm/drm_drv.h> #include <drm/drm_file.h> #include <drm/drm_gem.h> @@ -129,7 +129,7 @@ bool drm_dev_needs_global_mutex(struct drm_device *dev) */ struct drm_file *drm_file_alloc(struct drm_minor *minor) { - static atomic64_t ident = ATOMIC_INIT(0); + static atomic64_t ident = ATOMIC64_INIT(0); struct drm_device *dev = minor->dev; struct drm_file *file; int ret; diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index 47e6e8577b62..b781601946db 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -870,7 +870,7 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, INIT_LIST_HEAD(&fb->filp_head); fb->funcs = funcs; - strcpy(fb->comm, current->comm); + strscpy(fb->comm, current->comm); ret = __drm_mode_object_add(dev, &fb->base, DRM_MODE_OBJECT_FB, false, drm_framebuffer_free); diff --git a/drivers/gpu/drm/drm_gem_dma_helper.c b/drivers/gpu/drm/drm_gem_dma_helper.c index 870b90b78bc4..16988d316a6d 100644 --- a/drivers/gpu/drm/drm_gem_dma_helper.c +++ b/drivers/gpu/drm/drm_gem_dma_helper.c @@ -600,5 +600,5 @@ drm_gem_dma_prime_import_sg_table_vmap(struct drm_device *dev, EXPORT_SYMBOL(drm_gem_dma_prime_import_sg_table_vmap); MODULE_DESCRIPTION("DRM DMA memory-management helpers"); -MODULE_IMPORT_NS(DMA_BUF); +MODULE_IMPORT_NS("DMA_BUF"); MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c index 3bdb6ba37ff4..185534f56bab 100644 --- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c +++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c @@ -18,7 +18,7 @@ #include "drm_internal.h" -MODULE_IMPORT_NS(DMA_BUF); +MODULE_IMPORT_NS("DMA_BUF"); #define AFBC_HEADER_SIZE 16 #define AFBC_TH_LAYOUT_ALIGNMENT 8 diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index 8508060a1a95..5ab351409312 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -22,7 +22,7 @@ #include <drm/drm_prime.h> #include <drm/drm_print.h> -MODULE_IMPORT_NS(DMA_BUF); +MODULE_IMPORT_NS("DMA_BUF"); /** * DOC: overview @@ -800,5 +800,5 @@ drm_gem_shmem_prime_import_sg_table(struct drm_device *dev, EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table); MODULE_DESCRIPTION("DRM SHMEM memory-management helpers"); -MODULE_IMPORT_NS(DMA_BUF); +MODULE_IMPORT_NS("DMA_BUF"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index 1705bfc90b1e..b2b6a8e49dda 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -48,6 +48,14 @@ struct drm_prime_file_private; struct drm_printer; struct drm_vblank_crtc; +/* drm_client_event.c */ +#if defined(CONFIG_DRM_CLIENT) +void drm_client_debugfs_init(struct drm_device *dev); +#else +static inline void drm_client_debugfs_init(struct drm_device *dev) +{ } +#endif + /* drm_file.c */ extern struct mutex drm_global_mutex; bool drm_dev_needs_global_mutex(struct drm_device *dev); diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c index d8ee74701f1e..5e5c5f84daac 100644 --- a/drivers/gpu/drm/drm_mipi_dsi.c +++ b/drivers/gpu/drm/drm_mipi_dsi.c @@ -1522,7 +1522,7 @@ EXPORT_SYMBOL(mipi_dsi_compression_mode_ext_multi); /** * mipi_dsi_compression_mode_multi() - enable/disable DSC on the peripheral - * @dsi: DSI peripheral device + * @ctx: Context for multiple DSI transactions * @enable: Whether to enable or disable the DSC * * Enable or disable Display Stream Compression on the peripheral using the diff --git a/drivers/gpu/drm/drm_modeset_helper.c b/drivers/gpu/drm/drm_modeset_helper.c index 2c582020cb42..5565464c1734 100644 --- a/drivers/gpu/drm/drm_modeset_helper.c +++ b/drivers/gpu/drm/drm_modeset_helper.c @@ -21,7 +21,7 @@ */ #include <drm/drm_atomic_helper.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_client_event.h> #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_modeset_helper.h> @@ -185,7 +185,7 @@ EXPORT_SYMBOL(drm_crtc_init); * Zero on success, negative error code on error. * * See also: - * drm_kms_helper_poll_disable() and drm_fb_helper_set_suspend_unlocked(). + * drm_kms_helper_poll_disable() and drm_client_dev_suspend(). */ int drm_mode_config_helper_suspend(struct drm_device *dev) { @@ -199,10 +199,11 @@ int drm_mode_config_helper_suspend(struct drm_device *dev) if (dev->mode_config.poll_enabled) drm_kms_helper_poll_disable(dev); - drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 1); + drm_client_dev_suspend(dev, false); state = drm_atomic_helper_suspend(dev); if (IS_ERR(state)) { - drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0); + drm_client_dev_resume(dev, false); + /* * Don't enable polling if it was never initialized */ @@ -230,7 +231,7 @@ EXPORT_SYMBOL(drm_mode_config_helper_suspend); * Zero on success, negative error code on error. * * See also: - * drm_fb_helper_set_suspend_unlocked() and drm_kms_helper_poll_enable(). + * drm_client_dev_resume() and drm_kms_helper_poll_enable(). */ int drm_mode_config_helper_resume(struct drm_device *dev) { @@ -247,7 +248,8 @@ int drm_mode_config_helper_resume(struct drm_device *dev) DRM_ERROR("Failed to resume (%d)\n", ret); dev->mode_config.suspend_state = NULL; - drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0); + drm_client_dev_resume(dev, false); + /* * Don't enable polling if it is not initialized */ diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c index 177b600895d3..5c2abc9eca9c 100644 --- a/drivers/gpu/drm/drm_of.c +++ b/drivers/gpu/drm/drm_of.c @@ -341,8 +341,23 @@ static int drm_of_lvds_get_remote_pixels_type( return pixels_type; } +static int __drm_of_lvds_get_dual_link_pixel_order(int p1_pt, int p2_pt) +{ + /* + * A valid dual-lVDS bus is found when one port is marked with + * "dual-lvds-even-pixels", and the other port is marked with + * "dual-lvds-odd-pixels", bail out if the markers are not right. + */ + if (p1_pt + p2_pt != DRM_OF_LVDS_EVEN + DRM_OF_LVDS_ODD) + return -EINVAL; + + return p1_pt == DRM_OF_LVDS_EVEN ? + DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS : + DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS; +} + /** - * drm_of_lvds_get_dual_link_pixel_order - Get LVDS dual-link pixel order + * drm_of_lvds_get_dual_link_pixel_order - Get LVDS dual-link source pixel order * @port1: First DT port node of the Dual-link LVDS source * @port2: Second DT port node of the Dual-link LVDS source * @@ -387,19 +402,58 @@ int drm_of_lvds_get_dual_link_pixel_order(const struct device_node *port1, if (remote_p2_pt < 0) return remote_p2_pt; - /* - * A valid dual-lVDS bus is found when one remote port is marked with - * "dual-lvds-even-pixels", and the other remote port is marked with - * "dual-lvds-odd-pixels", bail out if the markers are not right. - */ - if (remote_p1_pt + remote_p2_pt != DRM_OF_LVDS_EVEN + DRM_OF_LVDS_ODD) + return __drm_of_lvds_get_dual_link_pixel_order(remote_p1_pt, remote_p2_pt); +} +EXPORT_SYMBOL_GPL(drm_of_lvds_get_dual_link_pixel_order); + +/** + * drm_of_lvds_get_dual_link_pixel_order_sink - Get LVDS dual-link sink pixel order + * @port1: First DT port node of the Dual-link LVDS sink + * @port2: Second DT port node of the Dual-link LVDS sink + * + * An LVDS dual-link connection is made of two links, with even pixels + * transitting on one link, and odd pixels on the other link. This function + * returns, for two ports of an LVDS dual-link sink, which port shall transmit + * the even and odd pixels, based on the requirements of the sink. + * + * The pixel order is determined from the dual-lvds-even-pixels and + * dual-lvds-odd-pixels properties in the sink's DT port nodes. If those + * properties are not present, or if their usage is not valid, this function + * returns -EINVAL. + * + * If either port is not connected, this function returns -EPIPE. + * + * @port1 and @port2 are typically DT sibling nodes, but may have different + * parents when, for instance, two separate LVDS decoders receive the even and + * odd pixels. + * + * Return: + * * DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS - @port1 receives even pixels and @port2 + * receives odd pixels + * * DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS - @port1 receives odd pixels and @port2 + * receives even pixels + * * -EINVAL - @port1 or @port2 are NULL + * * -EPIPE - when @port1 or @port2 are not connected + */ +int drm_of_lvds_get_dual_link_pixel_order_sink(struct device_node *port1, + struct device_node *port2) +{ + int sink_p1_pt, sink_p2_pt; + + if (!port1 || !port2) return -EINVAL; - return remote_p1_pt == DRM_OF_LVDS_EVEN ? - DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS : - DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS; + sink_p1_pt = drm_of_lvds_get_port_pixels_type(port1); + if (!sink_p1_pt) + return -EPIPE; + + sink_p2_pt = drm_of_lvds_get_port_pixels_type(port2); + if (!sink_p2_pt) + return -EPIPE; + + return __drm_of_lvds_get_dual_link_pixel_order(sink_p1_pt, sink_p2_pt); } -EXPORT_SYMBOL_GPL(drm_of_lvds_get_dual_link_pixel_order); +EXPORT_SYMBOL_GPL(drm_of_lvds_get_dual_link_pixel_order_sink); /** * drm_of_lvds_get_data_mapping - Get LVDS data mapping @@ -410,7 +464,9 @@ EXPORT_SYMBOL_GPL(drm_of_lvds_get_dual_link_pixel_order); * Return: * * MEDIA_BUS_FMT_RGB666_1X7X3_SPWG - data-mapping is "jeida-18" * * MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA - data-mapping is "jeida-24" + * * MEDIA_BUS_FMT_RGB101010_1X7X5_JEIDA - data-mapping is "jeida-30" * * MEDIA_BUS_FMT_RGB888_1X7X4_SPWG - data-mapping is "vesa-24" + * * MEDIA_BUS_FMT_RGB101010_1X7X5_SPWG - data-mapping is "vesa-30" * * -EINVAL - the "data-mapping" property is unsupported * * -ENODEV - the "data-mapping" property is missing */ @@ -427,8 +483,12 @@ int drm_of_lvds_get_data_mapping(const struct device_node *port) return MEDIA_BUS_FMT_RGB666_1X7X3_SPWG; if (!strcmp(mapping, "jeida-24")) return MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA; + if (!strcmp(mapping, "jeida-30")) + return MEDIA_BUS_FMT_RGB101010_1X7X5_JEIDA; if (!strcmp(mapping, "vesa-24")) return MEDIA_BUS_FMT_RGB888_1X7X4_SPWG; + if (!strcmp(mapping, "vesa-30")) + return MEDIA_BUS_FMT_RGB101010_1X7X5_SPWG; return -EINVAL; } diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index 3f2e2b851cbc..4a73821b81f6 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -421,7 +421,6 @@ static const struct dmi_system_id orientation_data[] = { }, { /* Lenovo Yoga Tab 3 X90F */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), - DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"), DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"), }, .driver_data = (void *)&lcd1600x2560_rightside_up, diff --git a/drivers/gpu/drm/drm_panic.c b/drivers/gpu/drm/drm_panic.c index 74412b7bf936..0a9ecc1380d2 100644 --- a/drivers/gpu/drm/drm_panic.c +++ b/drivers/gpu/drm/drm_panic.c @@ -209,6 +209,14 @@ static u32 convert_xrgb8888_to_argb2101010(u32 pix) return GENMASK(31, 30) /* set alpha bits */ | pix | ((pix >> 8) & 0x00300C03); } +static u32 convert_xrgb8888_to_abgr2101010(u32 pix) +{ + pix = ((pix & 0x00FF0000) >> 14) | + ((pix & 0x0000FF00) << 4) | + ((pix & 0x000000FF) << 22); + return GENMASK(31, 30) /* set alpha bits */ | pix | ((pix >> 8) & 0x00300C03); +} + /* * convert_from_xrgb8888 - convert one pixel from xrgb8888 to the desired format * @color: input color, in xrgb8888 format @@ -242,6 +250,8 @@ static u32 convert_from_xrgb8888(u32 color, u32 format) return convert_xrgb8888_to_xrgb2101010(color); case DRM_FORMAT_ARGB2101010: return convert_xrgb8888_to_argb2101010(color); + case DRM_FORMAT_ABGR2101010: + return convert_xrgb8888_to_abgr2101010(color); default: WARN_ONCE(1, "Can't convert to %p4cc\n", &format); return 0; diff --git a/drivers/gpu/drm/drm_panic_qr.rs b/drivers/gpu/drm/drm_panic_qr.rs index 1ef56cb07dfb..09500cddc009 100644 --- a/drivers/gpu/drm/drm_panic_qr.rs +++ b/drivers/gpu/drm/drm_panic_qr.rs @@ -209,12 +209,9 @@ const FORMAT_INFOS_QR_L: [u16; 8] = [ impl Version { /// Returns the smallest QR version than can hold these segments. fn from_segments(segments: &[&Segment<'_>]) -> Option<Version> { - for v in (1..=40).map(|k| Version(k)) { - if v.max_data() * 8 >= segments.iter().map(|s| s.total_size_bits(v)).sum() { - return Some(v); - } - } - None + (1..=40) + .map(Version) + .find(|&v| v.max_data() * 8 >= segments.iter().map(|s| s.total_size_bits(v)).sum()) } fn width(&self) -> u8 { @@ -242,7 +239,7 @@ impl Version { } fn alignment_pattern(&self) -> &'static [u8] { - &ALIGNMENT_PATTERNS[self.0 - 1] + ALIGNMENT_PATTERNS[self.0 - 1] } fn poly(&self) -> &'static [u8] { @@ -479,7 +476,7 @@ struct EncodedMsg<'a> { /// Data to be put in the QR code, with correct segment encoding, padding, and /// Error Code Correction. impl EncodedMsg<'_> { - fn new<'a, 'b>(segments: &[&Segment<'b>], data: &'a mut [u8]) -> Option<EncodedMsg<'a>> { + fn new<'a>(segments: &[&Segment<'_>], data: &'a mut [u8]) -> Option<EncodedMsg<'a>> { let version = Version::from_segments(segments)?; let ec_size = version.ec_size(); let g1_blocks = version.g1_blocks(); @@ -492,7 +489,7 @@ impl EncodedMsg<'_> { data.fill(0); let mut em = EncodedMsg { - data: data, + data, ec_size, g1_blocks, g2_blocks, @@ -722,7 +719,10 @@ impl QrImage<'_> { fn is_finder(&self, x: u8, y: u8) -> bool { let end = self.width - 8; - (x < 8 && y < 8) || (x < 8 && y >= end) || (x >= end && y < 8) + #[expect(clippy::nonminimal_bool)] + { + (x < 8 && y < 8) || (x < 8 && y >= end) || (x >= end && y < 8) + } } // Alignment pattern: 5x5 squares in a grid. @@ -979,10 +979,11 @@ pub unsafe extern "C" fn drm_panic_qr_generate( /// * `url_len`: Length of the URL. /// /// * If `url_len` > 0, remove the 2 segments header/length and also count the -/// conversion to numeric segments. +/// conversion to numeric segments. /// * If `url_len` = 0, only removes 3 bytes for 1 binary segment. #[no_mangle] pub extern "C" fn drm_panic_qr_max_data_size(version: u8, url_len: usize) -> usize { + #[expect(clippy::manual_range_contains)] if version < 1 || version > 40 { return 0; } diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 0e3f8adf162f..32a8781cfd67 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -40,7 +40,7 @@ #include "drm_internal.h" -MODULE_IMPORT_NS(DMA_BUF); +MODULE_IMPORT_NS("DMA_BUF"); /** * DOC: overview and lifetime rules diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index 92f21764246f..96b266b37ba4 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -33,7 +33,7 @@ #include <linux/moduleparam.h> #include <drm/drm_bridge.h> -#include <drm/drm_client.h> +#include <drm/drm_client_event.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> #include <drm/drm_fourcc.h> diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index 8e3d2d7060f8..4f2ab8a7b50f 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -712,16 +712,14 @@ static int drm_syncobj_fd_to_handle(struct drm_file *file_private, int fd, u32 *handle) { struct drm_syncobj *syncobj; - struct fd f = fdget(fd); + CLASS(fd, f)(fd); int ret; - if (!fd_file(f)) + if (fd_empty(f)) return -EINVAL; - if (fd_file(f)->f_op != &drm_syncobj_file_fops) { - fdput(f); + if (fd_file(f)->f_op != &drm_syncobj_file_fops) return -EINVAL; - } /* take a reference to put in the idr */ syncobj = fd_file(f)->private_data; @@ -739,7 +737,6 @@ static int drm_syncobj_fd_to_handle(struct drm_file *file_private, } else drm_syncobj_put(syncobj); - fdput(f); return ret; } diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c index 384df1659be6..b13a17276d07 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c @@ -482,7 +482,8 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state, } else { CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, VIVS_GL_FLUSH_CACHE_DEPTH | - VIVS_GL_FLUSH_CACHE_COLOR); + VIVS_GL_FLUSH_CACHE_COLOR | + VIVS_GL_FLUSH_CACHE_SHADER_L1); if (has_blt) { CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1); CMD_LOAD_STATE(buffer, VIVS_BLT_SET_COMMAND, 0x1); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c index 721d633aece9..7aa5f14d0c87 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c @@ -5,8 +5,6 @@ #include <linux/dma-mapping.h> -#include <drm/drm_mm.h> - #include "etnaviv_cmdbuf.h" #include "etnaviv_gem.h" #include "etnaviv_gpu.h" @@ -55,6 +53,7 @@ etnaviv_cmdbuf_suballoc_new(struct device *dev) return suballoc; free_suballoc: + mutex_destroy(&suballoc->lock); kfree(suballoc); return ERR_PTR(ret); @@ -79,6 +78,7 @@ void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc *suballoc) { dma_free_wc(suballoc->dev, SUBALLOC_SIZE, suballoc->vaddr, suballoc->paddr); + mutex_destroy(&suballoc->lock); kfree(suballoc); } diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index 6500f3999c5f..a46f9e4ac09a 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -538,6 +538,16 @@ static int etnaviv_bind(struct device *dev) priv->num_gpus = 0; priv->shm_gfp_mask = GFP_HIGHUSER | __GFP_RETRY_MAYFAIL | __GFP_NOWARN; + /* + * If the GPU is part of a system with DMA addressing limitations, + * request pages for our SHM backend buffers from the DMA32 zone to + * hopefully avoid performance killing SWIOTLB bounce buffering. + */ + if (dma_addressing_limited(dev)) { + priv->shm_gfp_mask |= GFP_DMA32; + priv->shm_gfp_mask &= ~__GFP_HIGHMEM; + } + priv->cmdbuf_suballoc = etnaviv_cmdbuf_suballoc_new(drm->dev); if (IS_ERR(priv->cmdbuf_suballoc)) { dev_err(drm->dev, "Failed to create cmdbuf suballocator\n"); @@ -564,6 +574,7 @@ out_unbind: out_destroy_suballoc: etnaviv_cmdbuf_suballoc_destroy(priv->cmdbuf_suballoc); out_free_priv: + mutex_destroy(&priv->gem_lock); kfree(priv); out_put: drm_dev_put(drm); @@ -608,7 +619,7 @@ static int etnaviv_pdev_probe(struct platform_device *pdev) if (!of_device_is_available(core_node)) continue; - drm_of_component_match_add(&pdev->dev, &match, + drm_of_component_match_add(dev, &match, component_compare_of, core_node); } } else { @@ -631,9 +642,9 @@ static int etnaviv_pdev_probe(struct platform_device *pdev) * bit to make sure we are allocating the command buffers and * TLBs in the lower 4 GiB address space. */ - if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)) || - dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) { - dev_dbg(&pdev->dev, "No suitable DMA available\n"); + if (dma_set_mask(dev, DMA_BIT_MASK(40)) || + dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) { + dev_dbg(dev, "No suitable DMA available\n"); return -ENODEV; } @@ -644,7 +655,7 @@ static int etnaviv_pdev_probe(struct platform_device *pdev) */ first_node = etnaviv_of_first_available_node(); if (first_node) { - of_dma_configure(&pdev->dev, first_node, true); + of_dma_configure(dev, first_node, true); of_node_put(first_node); } @@ -658,7 +669,7 @@ static void etnaviv_pdev_remove(struct platform_device *pdev) static struct platform_driver etnaviv_platform_driver = { .probe = etnaviv_pdev_probe, - .remove_new = etnaviv_pdev_remove, + .remove = etnaviv_pdev_remove, .driver = { .name = "etnaviv", }, diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index 5c0c9d4e3be1..16473c371444 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -514,6 +514,7 @@ void etnaviv_gem_free_object(struct drm_gem_object *obj) etnaviv_obj->ops->release(etnaviv_obj); drm_gem_object_release(obj); + mutex_destroy(&etnaviv_obj->lock); kfree(etnaviv_obj); } @@ -543,7 +544,7 @@ static const struct drm_gem_object_funcs etnaviv_gem_object_funcs = { .vm_ops = &vm_ops, }; -static int etnaviv_gem_new_impl(struct drm_device *dev, u32 flags, +static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags, const struct etnaviv_gem_ops *ops, struct drm_gem_object **obj) { struct etnaviv_gem_object *etnaviv_obj; @@ -570,6 +571,7 @@ static int etnaviv_gem_new_impl(struct drm_device *dev, u32 flags, if (!etnaviv_obj) return -ENOMEM; + etnaviv_obj->size = ALIGN(size, SZ_4K); etnaviv_obj->flags = flags; etnaviv_obj->ops = ops; @@ -590,15 +592,13 @@ int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file, struct drm_gem_object *obj = NULL; int ret; - size = PAGE_ALIGN(size); - - ret = etnaviv_gem_new_impl(dev, flags, &etnaviv_gem_shmem_ops, &obj); + ret = etnaviv_gem_new_impl(dev, size, flags, &etnaviv_gem_shmem_ops, &obj); if (ret) goto fail; lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class); - ret = drm_gem_object_init(dev, obj, size); + ret = drm_gem_object_init(dev, obj, PAGE_ALIGN(size)); if (ret) goto fail; @@ -627,7 +627,7 @@ int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags, struct drm_gem_object *obj; int ret; - ret = etnaviv_gem_new_impl(dev, flags, ops, &obj); + ret = etnaviv_gem_new_impl(dev, size, flags, ops, &obj); if (ret) return ret; @@ -686,7 +686,7 @@ static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj) kfree(etnaviv_obj->sgt); } if (etnaviv_obj->pages) { - int npages = etnaviv_obj->base.size >> PAGE_SHIFT; + unsigned int npages = etnaviv_obj->base.size >> PAGE_SHIFT; unpin_user_pages(etnaviv_obj->pages, npages); kvfree(etnaviv_obj->pages); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h index a42d260cac2c..687555aae807 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h @@ -36,6 +36,11 @@ struct etnaviv_gem_object { const struct etnaviv_gem_ops *ops; struct mutex lock; + /* + * The actual size that is visible to the GPU, not necessarily + * PAGE_SIZE aligned, but should be aligned to GPU page size. + */ + u32 size; u32 flags; struct list_head gem_node; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c index 3524b5811682..42e57d142554 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c @@ -10,14 +10,14 @@ #include "etnaviv_drv.h" #include "etnaviv_gem.h" -MODULE_IMPORT_NS(DMA_BUF); +MODULE_IMPORT_NS("DMA_BUF"); static struct lock_class_key etnaviv_prime_lock_class; struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj) { struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); - int npages = obj->size >> PAGE_SHIFT; + unsigned int npages = obj->size >> PAGE_SHIFT; if (WARN_ON(!etnaviv_obj->pages)) /* should have already pinned! */ return ERR_PTR(-EINVAL); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index 3d0f8d182506..3c0a5c3e0e3d 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -6,7 +6,6 @@ #include <drm/drm_file.h> #include <linux/dma-fence-array.h> #include <linux/file.h> -#include <linux/pm_runtime.h> #include <linux/dma-resv.h> #include <linux/sync_file.h> #include <linux/uaccess.h> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index 7c7f97793ddd..2d4c112ce033 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -574,8 +574,8 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu) continue; } - /* disable debug registers, as they are not normally needed */ - control |= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS; + /* enable debug register access */ + control &= ~VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS; gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control); failed = false; @@ -839,17 +839,8 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) if (ret) goto fail; - /* - * If the GPU is part of a system with DMA addressing limitations, - * request pages for our SHM backend buffers from the DMA32 zone to - * hopefully avoid performance killing SWIOTLB bounce buffering. - */ - if (dma_addressing_limited(gpu->dev)) - priv->shm_gfp_mask |= GFP_DMA32; - /* Create buffer: */ - ret = etnaviv_cmdbuf_init(priv->cmdbuf_suballoc, &gpu->buffer, - PAGE_SIZE); + ret = etnaviv_cmdbuf_init(priv->cmdbuf_suballoc, &gpu->buffer, SZ_4K); if (ret) { dev_err(gpu->dev, "could not create command buffer\n"); goto fail; @@ -1330,17 +1321,16 @@ static void sync_point_perfmon_sample_pre(struct etnaviv_gpu *gpu, { u32 val; + mutex_lock(&gpu->lock); + /* disable clock gating */ val = gpu_read_power(gpu, VIVS_PM_POWER_CONTROLS); val &= ~VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING; gpu_write_power(gpu, VIVS_PM_POWER_CONTROLS, val); - /* enable debug register */ - val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL); - val &= ~VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS; - gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val); - sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_PRE); + + mutex_unlock(&gpu->lock); } static void sync_point_perfmon_sample_post(struct etnaviv_gpu *gpu, @@ -1350,23 +1340,22 @@ static void sync_point_perfmon_sample_post(struct etnaviv_gpu *gpu, unsigned int i; u32 val; + mutex_lock(&gpu->lock); + sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_POST); + /* enable clock gating */ + val = gpu_read_power(gpu, VIVS_PM_POWER_CONTROLS); + val |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING; + gpu_write_power(gpu, VIVS_PM_POWER_CONTROLS, val); + + mutex_unlock(&gpu->lock); + for (i = 0; i < submit->nr_pmrs; i++) { const struct etnaviv_perfmon_request *pmr = submit->pmrs + i; *pmr->bo_vma = pmr->sequence; } - - /* disable debug register */ - val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL); - val |= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS; - gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val); - - /* enable clock gating */ - val = gpu_read_power(gpu, VIVS_PM_POWER_CONTROLS); - val |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING; - gpu_write_power(gpu, VIVS_PM_POWER_CONTROLS, val); } @@ -1862,7 +1851,7 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev) if (!gpu) return -ENOMEM; - gpu->dev = &pdev->dev; + gpu->dev = dev; mutex_init(&gpu->lock); mutex_init(&gpu->sched_lock); @@ -1876,8 +1865,8 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev) if (gpu->irq < 0) return gpu->irq; - err = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, 0, - dev_name(gpu->dev), gpu); + err = devm_request_irq(dev, gpu->irq, irq_handler, 0, + dev_name(dev), gpu); if (err) { dev_err(dev, "failed to request IRQ%u: %d\n", gpu->irq, err); return err; @@ -1914,13 +1903,13 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev) * autosuspend delay is rather arbitary: no measurements have * yet been performed to determine an appropriate value. */ - pm_runtime_use_autosuspend(gpu->dev); - pm_runtime_set_autosuspend_delay(gpu->dev, 200); - pm_runtime_enable(gpu->dev); + pm_runtime_use_autosuspend(dev); + pm_runtime_set_autosuspend_delay(dev, 200); + pm_runtime_enable(dev); - err = component_add(&pdev->dev, &gpu_ops); + err = component_add(dev, &gpu_ops); if (err < 0) { - dev_err(&pdev->dev, "failed to register component: %d\n", err); + dev_err(dev, "failed to register component: %d\n", err); return err; } @@ -1929,8 +1918,13 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev) static void etnaviv_gpu_platform_remove(struct platform_device *pdev) { + struct etnaviv_gpu *gpu = dev_get_drvdata(&pdev->dev); + component_del(&pdev->dev, &gpu_ops); pm_runtime_disable(&pdev->dev); + + mutex_destroy(&gpu->lock); + mutex_destroy(&gpu->sched_lock); } static int etnaviv_gpu_rpm_suspend(struct device *dev) @@ -1991,6 +1985,6 @@ struct platform_driver etnaviv_gpu_driver = { .of_match_table = etnaviv_gpu_match, }, .probe = etnaviv_gpu_platform_probe, - .remove_new = etnaviv_gpu_platform_remove, + .remove = etnaviv_gpu_platform_remove, .id_table = gpu_ids, }; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h index 31322195b9e4..4d8a7d48ade3 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h @@ -144,6 +144,7 @@ struct etnaviv_gpu { /* hang detection */ u32 hangcheck_dma_addr; + u32 hangcheck_primid; u32 hangcheck_fence; void __iomem *mmio; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c index 1661d589bf3e..7e065b3723cf 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c @@ -69,9 +69,11 @@ static int etnaviv_context_map(struct etnaviv_iommu_context *context, return ret; } -static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova, +static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, + u32 iova, unsigned int va_len, struct sg_table *sgt, int prot) -{ struct scatterlist *sg; +{ + struct scatterlist *sg; unsigned int da = iova; unsigned int i; int ret; @@ -81,14 +83,16 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova, for_each_sgtable_dma_sg(sgt, sg, i) { phys_addr_t pa = sg_dma_address(sg) - sg->offset; - size_t bytes = sg_dma_len(sg) + sg->offset; + unsigned int da_len = sg_dma_len(sg) + sg->offset; + unsigned int bytes = min_t(unsigned int, da_len, va_len); - VERB("map[%d]: %08x %pap(%zx)", i, iova, &pa, bytes); + VERB("map[%d]: %08x %pap(%x)", i, iova, &pa, bytes); ret = etnaviv_context_map(context, da, pa, bytes, prot); if (ret) goto fail; + va_len -= bytes; da += bytes; } @@ -104,21 +108,7 @@ fail: static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova, struct sg_table *sgt, unsigned len) { - struct scatterlist *sg; - unsigned int da = iova; - int i; - - for_each_sgtable_dma_sg(sgt, sg, i) { - size_t bytes = sg_dma_len(sg) + sg->offset; - - etnaviv_context_unmap(context, da, bytes); - - VERB("unmap[%d]: %08x(%zx)", i, iova, bytes); - - BUG_ON(!PAGE_ALIGNED(bytes)); - - da += bytes; - } + etnaviv_context_unmap(context, iova, len); context->flush_seq++; } @@ -131,7 +121,7 @@ static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context *context, lockdep_assert_held(&context->lock); etnaviv_iommu_unmap(context, mapping->vram_node.start, - etnaviv_obj->sgt, etnaviv_obj->base.size); + etnaviv_obj->sgt, etnaviv_obj->size); drm_mm_remove_node(&mapping->vram_node); } @@ -305,16 +295,14 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context, node = &mapping->vram_node; if (va) - ret = etnaviv_iommu_insert_exact(context, node, - etnaviv_obj->base.size, va); + ret = etnaviv_iommu_insert_exact(context, node, etnaviv_obj->size, va); else - ret = etnaviv_iommu_find_iova(context, node, - etnaviv_obj->base.size); + ret = etnaviv_iommu_find_iova(context, node, etnaviv_obj->size); if (ret < 0) goto unlock; mapping->iova = node->start; - ret = etnaviv_iommu_map(context, node->start, sgt, + ret = etnaviv_iommu_map(context, node->start, etnaviv_obj->size, sgt, ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE); if (ret < 0) { @@ -358,7 +346,7 @@ static void etnaviv_iommu_context_free(struct kref *kref) container_of(kref, struct etnaviv_iommu_context, refcount); etnaviv_cmdbuf_suballoc_unmap(context, &context->cmdbuf_mapping); - + mutex_destroy(&context->lock); context->global->ops->free(context); } void etnaviv_iommu_context_put(struct etnaviv_iommu_context *context) diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h index c01a147f0dfd..7f8ac0178547 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h @@ -61,7 +61,6 @@ struct etnaviv_iommu_global { /* P(age) T(able) A(rray) */ u64 *pta_cpu; dma_addr_t pta_dma; - struct spinlock pta_lock; DECLARE_BITMAP(pta_alloc, ETNAVIV_PTA_ENTRIES); } v2; }; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c index dc9dea664a28..d53a5c293373 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c @@ -62,6 +62,8 @@ static u32 pipe_perf_reg_read(struct etnaviv_gpu *gpu, u32 value = 0; unsigned i; + lockdep_assert_held(&gpu->lock); + for (i = 0; i < gpu->identity.pixel_pipes; i++) { pipe_select(gpu, clock, i); value += perf_reg_read(gpu, domain, signal); @@ -81,6 +83,8 @@ static u32 pipe_reg_read(struct etnaviv_gpu *gpu, u32 value = 0; unsigned i; + lockdep_assert_held(&gpu->lock); + for (i = 0; i < gpu->identity.pixel_pipes; i++) { pipe_select(gpu, clock, i); value += gpu_read(gpu, signal->data); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c index 23ced5896c7c..5b67eda122db 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c @@ -11,6 +11,7 @@ #include "etnaviv_gpu.h" #include "etnaviv_sched.h" #include "state.xml.h" +#include "state_hi.xml.h" static int etnaviv_job_hang_limit = 0; module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444); @@ -35,7 +36,7 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job { struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job); struct etnaviv_gpu *gpu = submit->gpu; - u32 dma_addr; + u32 dma_addr, primid = 0; int change; /* @@ -52,10 +53,22 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job */ dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS); change = dma_addr - gpu->hangcheck_dma_addr; + if (submit->exec_state == ETNA_PIPE_3D) { + /* guard against concurrent usage from perfmon_sample */ + mutex_lock(&gpu->lock); + gpu_write(gpu, VIVS_MC_PROFILE_CONFIG0, + VIVS_MC_PROFILE_CONFIG0_FE_CURRENT_PRIM << + VIVS_MC_PROFILE_CONFIG0_FE__SHIFT); + primid = gpu_read(gpu, VIVS_MC_PROFILE_FE_READ); + mutex_unlock(&gpu->lock); + } if (gpu->state == ETNA_GPU_STATE_RUNNING && (gpu->completed_fence != gpu->hangcheck_fence || - change < 0 || change > 16)) { + change < 0 || change > 16 || + (submit->exec_state == ETNA_PIPE_3D && + gpu->hangcheck_primid != primid))) { gpu->hangcheck_dma_addr = dma_addr; + gpu->hangcheck_primid = primid; gpu->hangcheck_fence = gpu->completed_fence; goto out_no_timeout; } diff --git a/drivers/gpu/drm/etnaviv/state_hi.xml.h b/drivers/gpu/drm/etnaviv/state_hi.xml.h index 829bc528e618..f7bc5f6e20ff 100644 --- a/drivers/gpu/drm/etnaviv/state_hi.xml.h +++ b/drivers/gpu/drm/etnaviv/state_hi.xml.h @@ -8,17 +8,17 @@ http://0x04.net/cgit/index.cgi/rules-ng-ng git clone git://0x04.net/rules-ng-ng The rules-ng-ng source files this header was generated from are: -- state.xml ( 29355 bytes, from 2024-01-19 10:18:54) -- common.xml ( 35664 bytes, from 2023-12-06 10:55:32) -- common_3d.xml ( 15069 bytes, from 2023-11-22 10:05:24) -- state_hi.xml ( 35854 bytes, from 2023-12-11 15:50:17) -- copyright.xml ( 1597 bytes, from 2016-11-10 13:58:32) -- state_2d.xml ( 52271 bytes, from 2023-06-02 12:35:03) -- state_3d.xml ( 89522 bytes, from 2024-01-19 10:18:54) -- state_blt.xml ( 14592 bytes, from 2023-11-22 10:05:09) -- state_vg.xml ( 5975 bytes, from 2016-11-10 13:58:32) - -Copyright (C) 2012-2023 by the following authors: +- state.xml ( 30729 bytes, from 2024-06-21 11:31:54) +- common.xml ( 35664 bytes, from 2023-12-13 09:33:18) +- common_3d.xml ( 15069 bytes, from 2023-12-13 09:33:18) +- state_hi.xml ( 35909 bytes, from 2024-06-21 11:31:54) +- copyright.xml ( 1597 bytes, from 2020-10-28 12:56:03) +- state_2d.xml ( 52271 bytes, from 2023-05-30 20:50:02) +- state_3d.xml ( 89626 bytes, from 2024-06-21 11:32:57) +- state_blt.xml ( 14592 bytes, from 2023-12-13 09:33:18) +- state_vg.xml ( 5975 bytes, from 2020-10-28 12:56:03) + +Copyright (C) 2012-2024 by the following authors: - Wladimir J. van der Laan <laanwj@gmail.com> - Christian Gmeiner <christian.gmeiner@gmail.com> - Lucas Stach <l.stach@pengutronix.de> @@ -467,6 +467,7 @@ DEALINGS IN THE SOFTWARE. #define VIVS_MC_PROFILE_CONFIG0 0x00000470 #define VIVS_MC_PROFILE_CONFIG0_FE__MASK 0x000000ff #define VIVS_MC_PROFILE_CONFIG0_FE__SHIFT 0 +#define VIVS_MC_PROFILE_CONFIG0_FE_CURRENT_PRIM 0x00000009 #define VIVS_MC_PROFILE_CONFIG0_FE_DRAW_COUNT 0x0000000a #define VIVS_MC_PROFILE_CONFIG0_FE_OUT_VERTEX_COUNT 0x0000000b #define VIVS_MC_PROFILE_CONFIG0_FE_CACHE_MISS_COUNT 0x0000000c diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c index 0ef7bc8848b0..b9e206303b48 100644 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c @@ -871,7 +871,7 @@ static void exynos5433_decon_remove(struct platform_device *pdev) struct platform_driver exynos5433_decon_driver = { .probe = exynos5433_decon_probe, - .remove_new = exynos5433_decon_remove, + .remove = exynos5433_decon_remove, .driver = { .name = "exynos5433-decon", .pm = pm_ptr(&exynos5433_decon_pm_ops), diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c index 0d185c0564b9..5170f72b0830 100644 --- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c @@ -37,6 +37,24 @@ #define WINDOWS_NR 2 +struct decon_data { + unsigned int vidw_buf_start_base; + unsigned int shadowcon_win_protect_shift; + unsigned int wincon_burstlen_shift; +}; + +static struct decon_data exynos7_decon_data = { + .vidw_buf_start_base = 0x80, + .shadowcon_win_protect_shift = 10, + .wincon_burstlen_shift = 11, +}; + +static struct decon_data exynos7870_decon_data = { + .vidw_buf_start_base = 0x880, + .shadowcon_win_protect_shift = 8, + .wincon_burstlen_shift = 10, +}; + struct decon_context { struct device *dev; struct drm_device *drm_dev; @@ -55,11 +73,19 @@ struct decon_context { wait_queue_head_t wait_vsync_queue; atomic_t wait_vsync_event; + const struct decon_data *data; struct drm_encoder *encoder; }; static const struct of_device_id decon_driver_dt_match[] = { - {.compatible = "samsung,exynos7-decon"}, + { + .compatible = "samsung,exynos7-decon", + .data = &exynos7_decon_data, + }, + { + .compatible = "samsung,exynos7870-decon", + .data = &exynos7870_decon_data, + }, {}, }; MODULE_DEVICE_TABLE(of, decon_driver_dt_match); @@ -81,10 +107,31 @@ static const enum drm_plane_type decon_win_types[WINDOWS_NR] = { DRM_PLANE_TYPE_CURSOR, }; -static void decon_wait_for_vblank(struct exynos_drm_crtc *crtc) +/** + * decon_shadow_protect_win() - disable updating values from shadow registers at vsync + * + * @ctx: display and enhancement controller context + * @win: window to protect registers for + * @protect: 1 to protect (disable updates) + */ +static void decon_shadow_protect_win(struct decon_context *ctx, + unsigned int win, bool protect) { - struct decon_context *ctx = crtc->ctx; + u32 bits, val; + unsigned int shift = ctx->data->shadowcon_win_protect_shift; + + bits = SHADOWCON_WINx_PROTECT(shift, win); + + val = readl(ctx->regs + SHADOWCON); + if (protect) + val |= bits; + else + val &= ~bits; + writel(val, ctx->regs + SHADOWCON); +} +static void decon_wait_for_vblank(struct decon_context *ctx) +{ if (ctx->suspended) return; @@ -100,25 +147,33 @@ static void decon_wait_for_vblank(struct exynos_drm_crtc *crtc) DRM_DEV_DEBUG_KMS(ctx->dev, "vblank wait timed out.\n"); } -static void decon_clear_channels(struct exynos_drm_crtc *crtc) +static void decon_clear_channels(struct decon_context *ctx) { - struct decon_context *ctx = crtc->ctx; unsigned int win, ch_enabled = 0; + u32 val; /* Check if any channel is enabled. */ for (win = 0; win < WINDOWS_NR; win++) { - u32 val = readl(ctx->regs + WINCON(win)); + val = readl(ctx->regs + WINCON(win)); if (val & WINCONx_ENWIN) { + decon_shadow_protect_win(ctx, win, true); + val &= ~WINCONx_ENWIN; writel(val, ctx->regs + WINCON(win)); ch_enabled = 1; + + decon_shadow_protect_win(ctx, win, false); } } + val = readl(ctx->regs + DECON_UPDATE); + val |= DECON_UPDATE_STANDALONE_F; + writel(val, ctx->regs + DECON_UPDATE); + /* Wait for vsync, as disable channel takes effect at next vsync */ if (ch_enabled) - decon_wait_for_vblank(ctx->crtc); + decon_wait_for_vblank(ctx); } static int decon_ctx_initialize(struct decon_context *ctx, @@ -126,7 +181,7 @@ static int decon_ctx_initialize(struct decon_context *ctx, { ctx->drm_dev = drm_dev; - decon_clear_channels(ctx->crtc); + decon_clear_channels(ctx); return exynos_drm_register_dma(drm_dev, ctx->dev, &ctx->dma_priv); } @@ -140,7 +195,7 @@ static void decon_ctx_remove(struct decon_context *ctx) static u32 decon_calc_clkdiv(struct decon_context *ctx, const struct drm_display_mode *mode) { - unsigned long ideal_clk = mode->clock; + unsigned long ideal_clk = mode->clock * 1000; u32 clkdiv; /* Find the clock divider value that gets us closest to ideal_clk */ @@ -263,6 +318,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win, { unsigned long val; int padding; + unsigned int shift = ctx->data->wincon_burstlen_shift; val = readl(ctx->regs + WINCON(win)); val &= ~WINCONx_BPPMODE_MASK; @@ -270,44 +326,44 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win, switch (fb->format->format) { case DRM_FORMAT_RGB565: val |= WINCONx_BPPMODE_16BPP_565; - val |= WINCONx_BURSTLEN_16WORD; + val |= WINCONx_BURSTLEN_16WORD(shift); break; case DRM_FORMAT_XRGB8888: val |= WINCONx_BPPMODE_24BPP_xRGB; - val |= WINCONx_BURSTLEN_16WORD; + val |= WINCONx_BURSTLEN_16WORD(shift); break; case DRM_FORMAT_XBGR8888: val |= WINCONx_BPPMODE_24BPP_xBGR; - val |= WINCONx_BURSTLEN_16WORD; + val |= WINCONx_BURSTLEN_16WORD(shift); break; case DRM_FORMAT_RGBX8888: val |= WINCONx_BPPMODE_24BPP_RGBx; - val |= WINCONx_BURSTLEN_16WORD; + val |= WINCONx_BURSTLEN_16WORD(shift); break; case DRM_FORMAT_BGRX8888: val |= WINCONx_BPPMODE_24BPP_BGRx; - val |= WINCONx_BURSTLEN_16WORD; + val |= WINCONx_BURSTLEN_16WORD(shift); break; case DRM_FORMAT_ARGB8888: val |= WINCONx_BPPMODE_32BPP_ARGB | WINCONx_BLD_PIX | WINCONx_ALPHA_SEL; - val |= WINCONx_BURSTLEN_16WORD; + val |= WINCONx_BURSTLEN_16WORD(shift); break; case DRM_FORMAT_ABGR8888: val |= WINCONx_BPPMODE_32BPP_ABGR | WINCONx_BLD_PIX | WINCONx_ALPHA_SEL; - val |= WINCONx_BURSTLEN_16WORD; + val |= WINCONx_BURSTLEN_16WORD(shift); break; case DRM_FORMAT_RGBA8888: val |= WINCONx_BPPMODE_32BPP_RGBA | WINCONx_BLD_PIX | WINCONx_ALPHA_SEL; - val |= WINCONx_BURSTLEN_16WORD; + val |= WINCONx_BURSTLEN_16WORD(shift); break; case DRM_FORMAT_BGRA8888: default: val |= WINCONx_BPPMODE_32BPP_BGRA | WINCONx_BLD_PIX | WINCONx_ALPHA_SEL; - val |= WINCONx_BURSTLEN_16WORD; + val |= WINCONx_BURSTLEN_16WORD(shift); break; } @@ -323,8 +379,8 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win, padding = (fb->pitches[0] / fb->format->cpp[0]) - fb->width; if (fb->width + padding < MIN_FB_WIDTH_FOR_16WORD_BURST) { - val &= ~WINCONx_BURSTLEN_MASK; - val |= WINCONx_BURSTLEN_8WORD; + val &= ~WINCONx_BURSTLEN_MASK(shift); + val |= WINCONx_BURSTLEN_8WORD(shift); } writel(val, ctx->regs + WINCON(win)); @@ -343,28 +399,6 @@ static void decon_win_set_colkey(struct decon_context *ctx, unsigned int win) writel(keycon1, ctx->regs + WKEYCON1_BASE(win)); } -/** - * decon_shadow_protect_win() - disable updating values from shadow registers at vsync - * - * @ctx: display and enhancement controller context - * @win: window to protect registers for - * @protect: 1 to protect (disable updates) - */ -static void decon_shadow_protect_win(struct decon_context *ctx, - unsigned int win, bool protect) -{ - u32 bits, val; - - bits = SHADOWCON_WINx_PROTECT(win); - - val = readl(ctx->regs + SHADOWCON); - if (protect) - val |= bits; - else - val &= ~bits; - writel(val, ctx->regs + SHADOWCON); -} - static void decon_atomic_begin(struct exynos_drm_crtc *crtc) { struct decon_context *ctx = crtc->ctx; @@ -391,6 +425,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc, unsigned int win = plane->index; unsigned int cpp = fb->format->cpp[0]; unsigned int pitch = fb->pitches[0]; + unsigned int vidw_addr0_base = ctx->data->vidw_buf_start_base; if (ctx->suspended) return; @@ -407,7 +442,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc, /* buffer start address */ val = (unsigned long)exynos_drm_fb_dma_addr(fb, 0); - writel(val, ctx->regs + VIDW_BUF_START(win)); + writel(val, ctx->regs + VIDW_BUF_START(vidw_addr0_base, win)); padding = (pitch / cpp) - fb->width; @@ -689,6 +724,7 @@ static int decon_probe(struct platform_device *pdev) ctx->dev = dev; ctx->suspended = true; + ctx->data = of_device_get_match_data(dev); i80_if_timings = of_get_child_by_name(dev->of_node, "i80-if-timings"); if (i80_if_timings) @@ -838,7 +874,7 @@ static DEFINE_RUNTIME_DEV_PM_OPS(exynos7_decon_pm_ops, exynos7_decon_suspend, struct platform_driver decon_driver = { .probe = decon_probe, - .remove_new = decon_remove, + .remove = decon_remove, .driver = { .name = "exynos-decon", .pm = pm_ptr(&exynos7_decon_pm_ops), diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c index 22142b293279..5bcf41e0bd04 100644 --- a/drivers/gpu/drm/exynos/exynos_dp.c +++ b/drivers/gpu/drm/exynos/exynos_dp.c @@ -279,7 +279,7 @@ MODULE_DEVICE_TABLE(of, exynos_dp_match); struct platform_driver dp_driver = { .probe = exynos_dp_probe, - .remove_new = exynos_dp_remove, + .remove = exynos_dp_remove, .driver = { .name = "exynos-dp", .pm = pm_ptr(&exynos_dp_pm_ops), diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h index 0ed4f2b8595a..1815374c38df 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h @@ -19,9 +19,6 @@ struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev, enum exynos_drm_output_type out_type, const struct exynos_drm_crtc_ops *ops, void *context); -void exynos_drm_crtc_wait_pending_update(struct exynos_drm_crtc *exynos_crtc); -void exynos_drm_crtc_finish_update(struct exynos_drm_crtc *exynos_crtc, - struct exynos_drm_plane *exynos_plane); /* This function gets crtc device matched with out_type. */ struct exynos_drm_crtc *exynos_drm_crtc_get_by_type(struct drm_device *drm_dev, diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 2a466d8179f4..1c44f85c5f54 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -363,7 +363,7 @@ static void exynos_drm_platform_shutdown(struct platform_device *pdev) static struct platform_driver exynos_drm_platform_driver = { .probe = exynos_drm_platform_probe, - .remove_new = exynos_drm_platform_remove, + .remove = exynos_drm_platform_remove, .shutdown = exynos_drm_platform_shutdown, .driver = { .name = "exynos-drm", diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index bf16deaae68b..896a03639e2d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -181,7 +181,7 @@ MODULE_DEVICE_TABLE(of, exynos_dsi_of_match); struct platform_driver dsi_driver = { .probe = samsung_dsim_probe, - .remove_new = samsung_dsim_remove, + .remove = samsung_dsim_remove, .driver = { .name = "exynos-dsi", .pm = &samsung_dsim_pm_ops, diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index 4d7ea65b7dd8..b150cfd92f6e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c @@ -1408,7 +1408,7 @@ MODULE_DEVICE_TABLE(of, fimc_of_match); struct platform_driver fimc_driver = { .probe = fimc_probe, - .remove_new = fimc_remove, + .remove = fimc_remove, .driver = { .of_match_table = fimc_of_match, .name = "exynos-drm-fimc", diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index f57df8c48139..1ad87584b1c2 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -1323,7 +1323,7 @@ static DEFINE_RUNTIME_DEV_PM_OPS(exynos_fimd_pm_ops, exynos_fimd_suspend, struct platform_driver fimd_driver = { .probe = fimd_probe, - .remove_new = fimd_remove, + .remove = fimd_remove, .driver = { .name = "exynos4-fb", .pm = pm_ptr(&exynos_fimd_pm_ops), diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 3a3b2c00e400..d32f2474cbaa 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -1607,7 +1607,7 @@ MODULE_DEVICE_TABLE(of, exynos_g2d_match); struct platform_driver g2d_driver = { .probe = g2d_probe, - .remove_new = g2d_remove, + .remove = g2d_remove, .driver = { .name = "exynos-drm-g2d", .pm = pm_ptr(&g2d_pm_ops), diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index 638ca96830e9..4787fee4696f 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -18,7 +18,7 @@ #include "exynos_drm_drv.h" #include "exynos_drm_gem.h" -MODULE_IMPORT_NS(DMA_BUF); +MODULE_IMPORT_NS("DMA_BUF"); static int exynos_drm_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index 59fa22050717..e6d516e1976d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c @@ -1286,7 +1286,7 @@ static int gsc_probe(struct platform_device *pdev) return ret; } - /* context initailization */ + /* context initialization */ ctx->id = pdev->id; platform_set_drvdata(pdev, ctx); @@ -1420,7 +1420,7 @@ MODULE_DEVICE_TABLE(of, exynos_drm_gsc_of_match); struct platform_driver gsc_driver = { .probe = gsc_probe, - .remove_new = gsc_remove, + .remove = gsc_remove, .driver = { .name = "exynos-drm-gsc", .pm = &gsc_pm_ops, diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c index d61ec451807c..b34ec6728337 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_mic.c +++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c @@ -460,7 +460,7 @@ MODULE_DEVICE_TABLE(of, exynos_mic_of_match); struct platform_driver mic_driver = { .probe = exynos_mic_probe, - .remove_new = exynos_mic_remove, + .remove = exynos_mic_remove, .driver = { .name = "exynos-mic", .pm = pm_ptr(&exynos_mic_pm_ops), diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c index 2eb0b701672f..7b0f4a98a70a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c +++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c @@ -451,7 +451,7 @@ static DEFINE_RUNTIME_DEV_PM_OPS(rotator_pm_ops, rotator_runtime_suspend, struct platform_driver rotator_driver = { .probe = rotator_probe, - .remove_new = rotator_remove, + .remove = rotator_remove, .driver = { .name = "exynos-rotator", .pm = pm_ptr(&rotator_pm_ops), diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c index 2788105ac780..c8a1b6b0a29c 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c +++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c @@ -719,7 +719,7 @@ MODULE_DEVICE_TABLE(of, exynos_scaler_match); struct platform_driver scaler_driver = { .probe = scaler_probe, - .remove_new = scaler_remove, + .remove = scaler_remove, .driver = { .name = "exynos-scaler", .pm = pm_ptr(&scaler_pm_ops), diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c index 6de0cced6c9d..fd388b1dbe68 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c @@ -467,7 +467,7 @@ static void vidi_remove(struct platform_device *pdev) struct platform_driver vidi_driver = { .probe = vidi_probe, - .remove_new = vidi_remove, + .remove = vidi_remove, .driver = { .name = "exynos-drm-vidi", .dev_groups = vidi_groups, diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index c9d4b9146df9..466a9e514aa1 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -2126,7 +2126,7 @@ static const struct dev_pm_ops exynos_hdmi_pm_ops = { struct platform_driver hdmi_driver = { .probe = hdmi_probe, - .remove_new = hdmi_remove, + .remove = hdmi_remove, .driver = { .name = "exynos-hdmi", .pm = &exynos_hdmi_pm_ops, diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index 1db955f00044..a3670d2eaab2 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -1335,5 +1335,5 @@ struct platform_driver mixer_driver = { .of_match_table = mixer_match_types, }, .probe = mixer_probe, - .remove_new = mixer_remove, + .remove = mixer_remove, }; diff --git a/drivers/gpu/drm/exynos/regs-decon7.h b/drivers/gpu/drm/exynos/regs-decon7.h index 5bc5f1db5196..216c106dac8f 100644 --- a/drivers/gpu/drm/exynos/regs-decon7.h +++ b/drivers/gpu/drm/exynos/regs-decon7.h @@ -48,7 +48,7 @@ /* SHADOWCON */ #define SHADOWCON 0x30 -#define SHADOWCON_WINx_PROTECT(_win) (1 << (10 + (_win))) +#define SHADOWCON_WINx_PROTECT(_shf, _win) (1 << ((_shf) + (_win))) /* WINCONx */ #define WINCON(_win) (0x50 + ((_win) * 4)) @@ -58,10 +58,9 @@ #define WINCONx_BUFSEL_SHIFT 28 #define WINCONx_TRIPLE_BUF_MODE (0x1 << 18) #define WINCONx_DOUBLE_BUF_MODE (0x0 << 18) -#define WINCONx_BURSTLEN_16WORD (0x0 << 11) -#define WINCONx_BURSTLEN_8WORD (0x1 << 11) -#define WINCONx_BURSTLEN_MASK (0x1 << 11) -#define WINCONx_BURSTLEN_SHIFT 11 +#define WINCONx_BURSTLEN_16WORD(_shf) (0x0 << (_shf)) +#define WINCONx_BURSTLEN_8WORD(_shf) (0x1 << (_shf)) +#define WINCONx_BURSTLEN_MASK(_shf) (0x1 << (_shf)) #define WINCONx_BLD_PLANE (0 << 8) #define WINCONx_BLD_PIX (1 << 8) #define WINCONx_ALPHA_MUL (1 << 7) @@ -89,9 +88,9 @@ #define VIDOSD_H(_x) (0x80 + ((_x) * 4)) /* Frame buffer start addresses: VIDWxxADD0n */ -#define VIDW_BUF_START(_win) (0x80 + ((_win) * 0x10)) -#define VIDW_BUF_START1(_win) (0x84 + ((_win) * 0x10)) -#define VIDW_BUF_START2(_win) (0x88 + ((_win) * 0x10)) +#define VIDW_BUF_START(_base, _win) ((_base) + ((_win) * 0x10)) +#define VIDW_BUF_START1(_base, _win) ((_base) + ((_win) * 0x10)) +#define VIDW_BUF_START2(_base, _win) ((_base) + ((_win) * 0x10)) #define VIDW_WHOLE_X(_win) (0x0130 + ((_win) * 8)) #define VIDW_WHOLE_Y(_win) (0x0134 + ((_win) * 8)) diff --git a/drivers/gpu/drm/fsl-dcu/Kconfig b/drivers/gpu/drm/fsl-dcu/Kconfig index 588ce2420af2..0e0f910ceb9f 100644 --- a/drivers/gpu/drm/fsl-dcu/Kconfig +++ b/drivers/gpu/drm/fsl-dcu/Kconfig @@ -9,6 +9,7 @@ config DRM_FSL_DCU select DRM_PANEL select REGMAP_MMIO select VIDEOMODE_HELPERS + select MFD_SYSCON if SOC_LS1021A help Choose this option if you have an Freescale DCU chipset. If M is selected the module will be called fsl-dcu-drm. diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c index 2b3f15f0dc56..be1ab673e49e 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c @@ -101,12 +101,25 @@ static void fsl_dcu_irq_uninstall(struct drm_device *dev) static int fsl_dcu_load(struct drm_device *dev, unsigned long flags) { struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; + struct regmap *scfg; int ret; ret = fsl_dcu_drm_modeset_init(fsl_dev); - if (ret < 0) { - dev_err(dev->dev, "failed to initialize mode setting\n"); - return ret; + if (ret < 0) + return dev_err_probe(dev->dev, ret, "failed to initialize mode setting\n"); + + scfg = syscon_regmap_lookup_by_compatible("fsl,ls1021a-scfg"); + if (PTR_ERR(scfg) != -ENODEV) { + /* + * For simplicity, enable the PIXCLK unconditionally, + * resulting in increased power consumption. Disabling + * the clock in PM or on unload could be implemented as + * a future improvement. + */ + ret = regmap_update_bits(scfg, SCFG_PIXCLKCR, SCFG_PIXCLKCR_PXCEN, + SCFG_PIXCLKCR_PXCEN); + if (ret < 0) + return dev_err_probe(dev->dev, ret, "failed to enable pixclk\n"); } ret = drm_vblank_init(dev, dev->mode_config.num_crtc); @@ -274,10 +287,8 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev) } fsl_dev->irq = platform_get_irq(pdev, 0); - if (fsl_dev->irq < 0) { - dev_err(dev, "failed to get irq\n"); + if (fsl_dev->irq < 0) return fsl_dev->irq; - } fsl_dev->regmap = devm_regmap_init_mmio(dev, base, &fsl_dcu_regmap_config); @@ -367,7 +378,7 @@ static void fsl_dcu_drm_shutdown(struct platform_device *pdev) static struct platform_driver fsl_dcu_drm_platform_driver = { .probe = fsl_dcu_drm_probe, - .remove_new = fsl_dcu_drm_remove, + .remove = fsl_dcu_drm_remove, .shutdown = fsl_dcu_drm_shutdown, .driver = { .name = "fsl-dcu", diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h index e2049a0e8a92..566396013c04 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h @@ -160,6 +160,9 @@ #define FSL_DCU_ARGB4444 12 #define FSL_DCU_YUV422 14 +#define SCFG_PIXCLKCR 0x28 +#define SCFG_PIXCLKCR_PXCEN BIT(31) + #define VF610_LAYER_REG_NUM 9 #define LS1021A_LAYER_REG_NUM 10 diff --git a/drivers/gpu/drm/fsl-dcu/fsl_tcon.c b/drivers/gpu/drm/fsl-dcu/fsl_tcon.c index 9eb5abaf7d66..49bbd00c77ae 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_tcon.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_tcon.c @@ -29,7 +29,7 @@ void fsl_tcon_bypass_enable(struct fsl_tcon *tcon) FSL_TCON_CTRL1_TCON_BYPASS); } -static struct regmap_config fsl_tcon_regmap_config = { +static const struct regmap_config fsl_tcon_regmap_config = { .reg_bits = 32, .reg_stride = 4, .val_bits = 32, diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig index c2927c37c40b..aa2ea128aa2f 100644 --- a/drivers/gpu/drm/gma500/Kconfig +++ b/drivers/gpu/drm/gma500/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only config DRM_GMA500 tristate "Intel GMA500/600/3600/3650 KMS Framebuffer" - depends on DRM && PCI && X86 && MMU + depends on DRM && PCI && X86 && MMU && HAS_IOPORT select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select FB_IOMEM_HELPERS if DRM_FBDEV_EMULATION diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c index 866a9ecaf8b9..8c488c98ac97 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c @@ -11,10 +11,10 @@ * Jianhua Li <lijianhua@huawei.com> */ +#include <linux/aperture.h> #include <linux/module.h> #include <linux/pci.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_client_setup.h> #include <drm/drm_drv.h> @@ -308,7 +308,7 @@ static int hibmc_pci_probe(struct pci_dev *pdev, struct drm_device *dev; int ret; - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &hibmc_driver); + ret = aperture_remove_conflicting_pci_devices(pdev, hibmc_driver.name); if (ret) return ret; diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c index a39cc549c20b..2eea9fb0e76b 100644 --- a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c +++ b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c @@ -889,7 +889,7 @@ MODULE_DEVICE_TABLE(of, dsi_of_match); static struct platform_driver dsi_driver = { .probe = dsi_probe, - .remove_new = dsi_remove, + .remove = dsi_remove, .driver = { .name = "dw-dsi", .of_match_table = dsi_of_match, diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c index 86a3a1faff49..b3ab944652a6 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c @@ -302,7 +302,7 @@ MODULE_DEVICE_TABLE(of, kirin_drm_dt_ids); static struct platform_driver kirin_drm_platform_driver = { .probe = kirin_drm_platform_probe, - .remove_new = kirin_drm_platform_remove, + .remove = kirin_drm_platform_remove, .shutdown = kirin_drm_platform_shutdown, .driver = { .name = "kirin-drm", diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c index 3077ce5470f6..e0953777a206 100644 --- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c +++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c @@ -3,12 +3,12 @@ * Copyright 2021 Microsoft */ +#include <linux/aperture.h> #include <linux/efi.h> #include <linux/hyperv.h> #include <linux/module.h> #include <linux/pci.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_client_setup.h> #include <drm/drm_drv.h> @@ -126,7 +126,7 @@ static int hyperv_vmbus_probe(struct hv_device *hdev, goto err_hv_set_drv_data; } - drm_aperture_remove_framebuffers(&hyperv_driver); + aperture_remove_all_conflicting_devices(hyperv_driver.name); ret = hyperv_setup_vram(hv, hdev); if (ret) diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index 42fbddb60a01..5e939004b646 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -10,6 +10,7 @@ config DRM_I915 # the shmem_readpage() which depends upon tmpfs select SHMEM select TMPFS + select DRM_CLIENT_SELECTION select DRM_DISPLAY_DP_HELPER select DRM_DISPLAY_DSC_HELPER select DRM_DISPLAY_HDCP_HELPER diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c index 35742d7a4856..a7f4b0ce0740 100644 --- a/drivers/gpu/drm/i915/display/intel_display_driver.c +++ b/drivers/gpu/drm/i915/display/intel_display_driver.c @@ -11,7 +11,7 @@ #include <acpi/video.h> #include <drm/display/drm_dp_mst_helper.h> #include <drm/drm_atomic_helper.h> -#include <drm/drm_client.h> +#include <drm/drm_client_event.h> #include <drm/drm_mode_config.h> #include <drm/drm_privacy_screen_consumer.h> #include <drm/drm_probe_helper.h> diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c index e9441bb3d785..6a7060889f40 100644 --- a/drivers/gpu/drm/i915/display/intel_fb.c +++ b/drivers/gpu/drm/i915/display/intel_fb.c @@ -519,6 +519,19 @@ bool intel_fb_needs_64k_phys(u64 modifier) INTEL_PLANE_CAP_NEED64K_PHYS); } +/** + * intel_fb_is_tile4_modifier: Check if a modifier is a tile4 modifier type + * @modifier: Modifier to check + * + * Returns: + * Returns %true if @modifier is a tile4 modifier. + */ +bool intel_fb_is_tile4_modifier(u64 modifier) +{ + return plane_caps_contain_any(lookup_modifier(modifier)->plane_caps, + INTEL_PLANE_CAP_TILING_4); +} + static bool check_modifier_display_ver_range(const struct intel_modifier_desc *md, u8 display_ver_from, u8 display_ver_until) { diff --git a/drivers/gpu/drm/i915/display/intel_fb.h b/drivers/gpu/drm/i915/display/intel_fb.h index 8240febff84c..d78993e5eb62 100644 --- a/drivers/gpu/drm/i915/display/intel_fb.h +++ b/drivers/gpu/drm/i915/display/intel_fb.h @@ -36,6 +36,7 @@ bool intel_fb_is_ccs_modifier(u64 modifier); bool intel_fb_is_rc_ccs_cc_modifier(u64 modifier); bool intel_fb_is_mc_ccs_modifier(u64 modifier); bool intel_fb_needs_64k_phys(u64 modifier); +bool intel_fb_is_tile4_modifier(u64 modifier); bool intel_fb_is_ccs_aux_plane(const struct drm_framebuffer *fb, int color_plane); int intel_fb_rc_ccs_cc_plane(const struct drm_framebuffer *fb); diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c index b9252baec6b2..4c7bcf6806ff 100644 --- a/drivers/gpu/drm/i915/display/skl_universal_plane.c +++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c @@ -1634,6 +1634,17 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state, return -EINVAL; } + /* + * Display20 onward tile4 hflip is not supported + */ + if (rotation & DRM_MODE_REFLECT_X && + intel_fb_is_tile4_modifier(fb->modifier) && + DISPLAY_VER(dev_priv) >= 20) { + drm_dbg_kms(&dev_priv->drm, + "horizontal flip is not supported with tile4 surface formats\n"); + return -EINVAL; + } + if (drm_rotation_90_or_270(rotation)) { if (!intel_fb_supports_90_270_rotation(to_intel_framebuffer(fb))) { drm_dbg_kms(&dev_priv->drm, diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c index 1df74f7aa3dc..9473050ac842 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c @@ -16,7 +16,7 @@ #include "i915_gem_object.h" #include "i915_scatterlist.h" -MODULE_IMPORT_NS(DMA_BUF); +MODULE_IMPORT_NS("DMA_BUF"); I915_SELFTEST_DECLARE(static bool force_different_devices;) diff --git a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c index 8fe0499308ff..4904d0f4162c 100644 --- a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c @@ -169,7 +169,7 @@ static u32 *__gen2_emit_breadcrumb(struct i915_request *rq, u32 *cs, return cs; } -u32 *gen3_emit_breadcrumb(struct i915_request *rq, u32 *cs) +u32 *gen2_emit_breadcrumb(struct i915_request *rq, u32 *cs) { return __gen2_emit_breadcrumb(rq, cs, 16, 8); } @@ -248,7 +248,7 @@ int i830_emit_bb_start(struct i915_request *rq, return 0; } -int gen3_emit_bb_start(struct i915_request *rq, +int gen2_emit_bb_start(struct i915_request *rq, u64 offset, u32 len, unsigned int dispatch_flags) { @@ -292,29 +292,12 @@ int gen4_emit_bb_start(struct i915_request *rq, void gen2_irq_enable(struct intel_engine_cs *engine) { - struct drm_i915_private *i915 = engine->i915; - - i915->irq_mask &= ~engine->irq_enable_mask; - intel_uncore_write16(&i915->uncore, GEN2_IMR, i915->irq_mask); - ENGINE_POSTING_READ16(engine, RING_IMR); -} - -void gen2_irq_disable(struct intel_engine_cs *engine) -{ - struct drm_i915_private *i915 = engine->i915; - - i915->irq_mask |= engine->irq_enable_mask; - intel_uncore_write16(&i915->uncore, GEN2_IMR, i915->irq_mask); -} - -void gen3_irq_enable(struct intel_engine_cs *engine) -{ engine->i915->irq_mask &= ~engine->irq_enable_mask; intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask); intel_uncore_posting_read_fw(engine->uncore, GEN2_IMR); } -void gen3_irq_disable(struct intel_engine_cs *engine) +void gen2_irq_disable(struct intel_engine_cs *engine) { engine->i915->irq_mask |= engine->irq_enable_mask; intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask); diff --git a/drivers/gpu/drm/i915/gt/gen2_engine_cs.h b/drivers/gpu/drm/i915/gt/gen2_engine_cs.h index a5cd64a65c9e..7b37560fc356 100644 --- a/drivers/gpu/drm/i915/gt/gen2_engine_cs.h +++ b/drivers/gpu/drm/i915/gt/gen2_engine_cs.h @@ -15,13 +15,13 @@ int gen2_emit_flush(struct i915_request *rq, u32 mode); int gen4_emit_flush_rcs(struct i915_request *rq, u32 mode); int gen4_emit_flush_vcs(struct i915_request *rq, u32 mode); -u32 *gen3_emit_breadcrumb(struct i915_request *rq, u32 *cs); +u32 *gen2_emit_breadcrumb(struct i915_request *rq, u32 *cs); u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs); int i830_emit_bb_start(struct i915_request *rq, u64 offset, u32 len, unsigned int dispatch_flags); -int gen3_emit_bb_start(struct i915_request *rq, +int gen2_emit_bb_start(struct i915_request *rq, u64 offset, u32 len, unsigned int dispatch_flags); int gen4_emit_bb_start(struct i915_request *rq, @@ -30,8 +30,6 @@ int gen4_emit_bb_start(struct i915_request *rq, void gen2_irq_enable(struct intel_engine_cs *engine); void gen2_irq_disable(struct intel_engine_cs *engine); -void gen3_irq_enable(struct intel_engine_cs *engine); -void gen3_irq_disable(struct intel_engine_cs *engine); void gen5_irq_enable(struct intel_engine_cs *engine); void gen5_irq_disable(struct intel_engine_cs *engine); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_regs.h b/drivers/gpu/drm/i915/gt/intel_engine_regs.h index a8eac59e3779..1c4784cb296c 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_regs.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_regs.h @@ -15,6 +15,7 @@ #define HEAD_WRAP_COUNT 0xFFE00000 #define HEAD_WRAP_ONE 0x00200000 #define HEAD_ADDR 0x001FFFFC +#define HEAD_WAIT_I8XX (1 << 0) /* gen2, PRBx_HEAD */ #define RING_START(base) _MMIO((base) + 0x38) #define RING_CTL(base) _MMIO((base) + 0x3c) #define RING_CTL_SIZE(size) ((size) - PAGE_SIZE) /* in bytes -> pages */ @@ -26,7 +27,6 @@ #define RING_VALID_MASK 0x00000001 #define RING_VALID 0x00000001 #define RING_INVALID 0x00000000 -#define RING_WAIT_I8XX (1 << 0) /* gen2, PRBx_HEAD */ #define RING_WAIT (1 << 11) /* gen3+, PRBx_CTL */ #define RING_WAIT_SEMAPHORE (1 << 10) /* gen6+ */ #define RING_SYNC_0(base) _MMIO((base) + 0x40) diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index d6674aec82fc..c4a351ebf395 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -302,7 +302,7 @@ static void gen6_check_faults(struct intel_gt *gt) { struct intel_engine_cs *engine; enum intel_engine_id id; - u32 fault; + unsigned long fault; for_each_engine(engine, gt, id) { fault = GEN6_RING_FAULT_REG_READ(engine); @@ -310,8 +310,8 @@ static void gen6_check_faults(struct intel_gt *gt) gt_dbg(gt, "Unexpected fault\n" "\tAddr: 0x%08lx\n" "\tAddress space: %s\n" - "\tSource ID: %d\n" - "\tType: %d\n", + "\tSource ID: %ld\n" + "\tType: %ld\n", fault & PAGE_MASK, fault & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT", diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c index 8d08b38874ef..b635aa2820d9 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c @@ -431,7 +431,7 @@ static int llc_show(struct seq_file *m, void *data) max_gpu_freq /= GEN9_FREQ_SCALER; } - seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); + seq_puts(m, "GPU freq (MHz)\tEffective GPU freq (MHz)\tEffective Ring freq (MHz)\n"); wakeref = intel_runtime_pm_get(gt->uncore->rpm); for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) { diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c index 72277bc8322e..32f3b52a183a 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c @@ -192,6 +192,7 @@ static bool stop_ring(struct intel_engine_cs *engine) static int xcs_resume(struct intel_engine_cs *engine) { struct intel_ring *ring = engine->legacy.ring; + ktime_t kt; ENGINE_TRACE(engine, "ring:{HEAD:%04x, TAIL:%04x}\n", ring->head, ring->tail); @@ -230,9 +231,27 @@ static int xcs_resume(struct intel_engine_cs *engine) set_pp_dir(engine); /* First wake the ring up to an empty/idle ring */ - ENGINE_WRITE_FW(engine, RING_HEAD, ring->head); + for ((kt) = ktime_get() + (2 * NSEC_PER_MSEC); + ktime_before(ktime_get(), (kt)); cpu_relax()) { + /* + * In case of resets fails because engine resumes from + * incorrect RING_HEAD and then GPU may be then fed + * to invalid instrcutions, which may lead to unrecoverable + * hang. So at first write doesn't succeed then try again. + */ + ENGINE_WRITE_FW(engine, RING_HEAD, ring->head); + if (ENGINE_READ_FW(engine, RING_HEAD) == ring->head) + break; + } + ENGINE_WRITE_FW(engine, RING_TAIL, ring->head); - ENGINE_POSTING_READ(engine, RING_TAIL); + if (ENGINE_READ_FW(engine, RING_HEAD) != ENGINE_READ_FW(engine, RING_TAIL)) { + ENGINE_TRACE(engine, "failed to reset empty ring: [%x, %x]: %x\n", + ENGINE_READ_FW(engine, RING_HEAD), + ENGINE_READ_FW(engine, RING_TAIL), + ring->head); + goto err; + } ENGINE_WRITE_FW(engine, RING_CTL, RING_CTL_SIZE(ring->size) | RING_VALID); @@ -241,12 +260,16 @@ static int xcs_resume(struct intel_engine_cs *engine) if (__intel_wait_for_register_fw(engine->uncore, RING_CTL(engine->mmio_base), RING_VALID, RING_VALID, - 5000, 0, NULL)) + 5000, 0, NULL)) { + ENGINE_TRACE(engine, "failed to restart\n"); goto err; + } - if (GRAPHICS_VER(engine->i915) > 2) + if (GRAPHICS_VER(engine->i915) > 2) { ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING)); + ENGINE_POSTING_READ(engine, RING_MI_MODE); + } /* Now awake, let it get started */ if (ring->tail != ring->head) { @@ -1090,9 +1113,6 @@ static void setup_irq(struct intel_engine_cs *engine) } else if (GRAPHICS_VER(i915) >= 5) { engine->irq_enable = gen5_irq_enable; engine->irq_disable = gen5_irq_disable; - } else if (GRAPHICS_VER(i915) >= 3) { - engine->irq_enable = gen3_irq_enable; - engine->irq_disable = gen3_irq_disable; } else { engine->irq_enable = gen2_irq_enable; engine->irq_disable = gen2_irq_disable; @@ -1146,7 +1166,7 @@ static void setup_common(struct intel_engine_cs *engine) * equivalent to our next initial bread so we can elide * engine->emit_init_breadcrumb(). */ - engine->emit_fini_breadcrumb = gen3_emit_breadcrumb; + engine->emit_fini_breadcrumb = gen2_emit_breadcrumb; if (GRAPHICS_VER(i915) == 5) engine->emit_fini_breadcrumb = gen5_emit_breadcrumb; @@ -1159,7 +1179,7 @@ static void setup_common(struct intel_engine_cs *engine) else if (IS_I830(i915) || IS_I845G(i915)) engine->emit_bb_start = i830_emit_bb_start; else - engine->emit_bb_start = gen3_emit_bb_start; + engine->emit_bb_start = gen2_emit_bb_start; } static void setup_rcs(struct intel_engine_cs *engine) diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.c b/drivers/gpu/drm/i915/gt/shmem_utils.c index 1fb6ff77fd89..bb696b29ee2c 100644 --- a/drivers/gpu/drm/i915/gt/shmem_utils.c +++ b/drivers/gpu/drm/i915/gt/shmem_utils.c @@ -40,7 +40,7 @@ struct file *shmem_create_from_object(struct drm_i915_gem_object *obj) if (i915_gem_object_is_shmem(obj)) { file = obj->base.filp; - atomic_long_inc(&file->f_count); + get_file(file); return file; } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index 097fc6bd1285..5949ff0b0161 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -239,8 +239,16 @@ static u32 guc_ctl_debug_flags(struct intel_guc *guc) static u32 guc_ctl_feature_flags(struct intel_guc *guc) { + struct intel_gt *gt = guc_to_gt(guc); u32 flags = 0; + /* + * Enable PXP GuC autoteardown flow. + * NB: MTL does things differently. + */ + if (HAS_PXP(gt->i915) && !IS_METEORLAKE(gt->i915)) + flags |= GUC_CTL_ENABLE_GUC_PXP_CTL; + if (!intel_guc_submission_is_used(guc)) flags |= GUC_CTL_DISABLE_SCHEDULER; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h index 263c9c3f6a03..4ce6e2332a63 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h @@ -105,6 +105,7 @@ #define GUC_WA_ENABLE_TSC_CHECK_ON_RC6 BIT(22) #define GUC_CTL_FEATURE 2 +#define GUC_CTL_ENABLE_GUC_PXP_CTL BIT(1) #define GUC_CTL_ENABLE_SLPC BIT(2) #define GUC_CTL_DISABLE_SCHEDULER BIT(14) diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index ca0fb126b02d..b27ff77bfb50 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -53,8 +53,8 @@ #include "intel_gvt.h" #include "gvt.h" -MODULE_IMPORT_NS(DMA_BUF); -MODULE_IMPORT_NS(I915_GVT); +MODULE_IMPORT_NS("DMA_BUF"); +MODULE_IMPORT_NS("I915_GVT"); /* helper macros copied from vfio-pci */ #define VFIO_PCI_OFFSET_SHIFT 40 diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c index 482b6ea05048..bcf854dc93b4 100644 --- a/drivers/gpu/drm/i915/i915_driver.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -27,6 +27,7 @@ * */ +#include <linux/aperture.h> #include <linux/acpi.h> #include <linux/device.h> #include <linux/module.h> @@ -39,7 +40,6 @@ #include <linux/vga_switcheroo.h> #include <linux/vt.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_ioctl.h> #include <drm/drm_managed.h> @@ -494,7 +494,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) if (ret) goto err_perf; - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, dev_priv->drm.driver); + ret = aperture_remove_conflicting_pci_devices(pdev, dev_priv->drm.driver->name); if (ret) goto err_ggtt; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 4131a0dbeb3d..dcf6050e5550 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -670,6 +670,9 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_RPS(i915) (INTEL_INFO(i915)->has_rps) +#define HAS_PXP(i915) \ + (IS_ENABLED(CONFIG_DRM_I915_PXP) && INTEL_INFO(i915)->has_pxp) + #define HAS_HECI_PXP(i915) \ (INTEL_INFO(i915)->has_heci_pxp) diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index fee80a2df245..71c0daef1996 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -1104,7 +1104,7 @@ i915_vma_coredump_create(const struct intel_gt *gt, } INIT_LIST_HEAD(&dst->page_list); - strcpy(dst->name, name); + strscpy(dst->name, name); dst->next = NULL; dst->gtt_offset = vma_res->start; @@ -1404,7 +1404,7 @@ static bool record_context(struct i915_gem_context_coredump *e, rcu_read_lock(); task = pid_task(ctx->pid, PIDTYPE_PID); if (task) { - strcpy(e->comm, task->comm); + strscpy(e->comm, task->comm); e->pid = task->pid; } rcu_read_unlock(); @@ -1450,7 +1450,7 @@ capture_vma_snapshot(struct intel_engine_capture_vma *next, return next; } - strcpy(c->name, name); + strscpy(c->name, name); c->vma_res = i915_vma_resource_get(vma_res); c->next = next; diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index 21eb0c5b320d..93fbf53578da 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -356,7 +356,7 @@ static bool exclusive_mmio_access(const struct drm_i915_private *i915) return GRAPHICS_VER(i915) == 7; } -static void engine_sample(struct intel_engine_cs *engine, unsigned int period_ns) +static void gen3_engine_sample(struct intel_engine_cs *engine, unsigned int period_ns) { struct intel_engine_pmu *pmu = &engine->pmu; bool busy; @@ -391,6 +391,31 @@ static void engine_sample(struct intel_engine_cs *engine, unsigned int period_ns add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns); } +static void gen2_engine_sample(struct intel_engine_cs *engine, unsigned int period_ns) +{ + struct intel_engine_pmu *pmu = &engine->pmu; + u32 tail, head, acthd; + + tail = ENGINE_READ_FW(engine, RING_TAIL); + head = ENGINE_READ_FW(engine, RING_HEAD); + acthd = ENGINE_READ_FW(engine, ACTHD); + + if (head & HEAD_WAIT_I8XX) + add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns); + + if (head & HEAD_WAIT_I8XX || head != acthd || + (head & HEAD_ADDR) != (tail & TAIL_ADDR)) + add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns); +} + +static void engine_sample(struct intel_engine_cs *engine, unsigned int period_ns) +{ + if (GRAPHICS_VER(engine->i915) >= 3) + gen3_engine_sample(engine, period_ns); + else + gen2_engine_sample(engine, period_ns); +} + static void engines_sample(struct intel_gt *gt, unsigned int period_ns) { @@ -834,15 +859,14 @@ static void i915_pmu_event_start(struct perf_event *event, int flags) static void i915_pmu_event_stop(struct perf_event *event, int flags) { - struct drm_i915_private *i915 = - container_of(event->pmu, typeof(*i915), pmu.base); - struct i915_pmu *pmu = &i915->pmu; + struct i915_pmu *pmu = event_to_pmu(event); if (pmu->closed) goto out; if (flags & PERF_EF_UPDATE) i915_pmu_event_read(event); + i915_pmu_disable(event); out: @@ -1232,17 +1256,6 @@ static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu) cpuhp_state_remove_instance(cpuhp_slot, &pmu->cpuhp.node); } -static bool is_igp(struct drm_i915_private *i915) -{ - struct pci_dev *pdev = to_pci_dev(i915->drm.dev); - - /* IGP is 0000:00:02.0 */ - return pci_domain_nr(pdev->bus) == 0 && - pdev->bus->number == 0 && - PCI_SLOT(pdev->devfn) == 2 && - PCI_FUNC(pdev->devfn) == 0; -} - void i915_pmu_register(struct drm_i915_private *i915) { struct i915_pmu *pmu = &i915->pmu; @@ -1255,18 +1268,13 @@ void i915_pmu_register(struct drm_i915_private *i915) int ret = -ENOMEM; - if (GRAPHICS_VER(i915) <= 2) { - drm_info(&i915->drm, "PMU not supported for this GPU."); - return; - } - spin_lock_init(&pmu->lock); hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); pmu->timer.function = i915_sample; pmu->cpuhp.cpu = -1; init_rc6(pmu); - if (!is_igp(i915)) { + if (IS_DGFX(i915)) { pmu->name = kasprintf(GFP_KERNEL, "i915_%s", dev_name(i915->drm.dev)); @@ -1318,7 +1326,7 @@ err_attr: pmu->base.event_init = NULL; free_event_attributes(pmu); err_name: - if (!is_igp(i915)) + if (IS_DGFX(i915)) kfree(pmu->name); err: drm_notice(&i915->drm, "Failed to register PMU!\n"); @@ -1346,7 +1354,7 @@ void i915_pmu_unregister(struct drm_i915_private *i915) perf_pmu_unregister(&pmu->base); pmu->base.event_init = NULL; kfree(pmu->base.attr_groups); - if (!is_igp(i915)) + if (IS_DGFX(i915)) kfree(pmu->name); free_event_attributes(pmu); } diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 519e096c607c..8f62cfa23fb7 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -273,11 +273,6 @@ i915_request_active_engine(struct i915_request *rq, return ret; } -static void __rq_init_watchdog(struct i915_request *rq) -{ - rq->watchdog.timer.function = NULL; -} - static enum hrtimer_restart __rq_watchdog_expired(struct hrtimer *hrtimer) { struct i915_request *rq = @@ -294,6 +289,14 @@ static enum hrtimer_restart __rq_watchdog_expired(struct hrtimer *hrtimer) return HRTIMER_NORESTART; } +static void __rq_init_watchdog(struct i915_request *rq) +{ + struct i915_request_watchdog *wdg = &rq->watchdog; + + hrtimer_init(&wdg->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + wdg->timer.function = __rq_watchdog_expired; +} + static void __rq_arm_watchdog(struct i915_request *rq) { struct i915_request_watchdog *wdg = &rq->watchdog; @@ -304,8 +307,6 @@ static void __rq_arm_watchdog(struct i915_request *rq) i915_request_get(rq); - hrtimer_init(&wdg->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - wdg->timer.function = __rq_watchdog_expired; hrtimer_start_range_ns(&wdg->timer, ns_to_ktime(ce->watchdog.timeout_us * NSEC_PER_USEC), @@ -317,7 +318,7 @@ static void __rq_cancel_watchdog(struct i915_request *rq) { struct i915_request_watchdog *wdg = &rq->watchdog; - if (wdg->timer.function && hrtimer_try_to_cancel(&wdg->timer) > 0) + if (hrtimer_try_to_cancel(&wdg->timer) > 0) i915_request_put(rq); } diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c index 5a01d60e5186..a5383a2bc64b 100644 --- a/drivers/gpu/drm/i915/intel_gvt.c +++ b/drivers/gpu/drm/i915/intel_gvt.c @@ -204,7 +204,7 @@ int intel_gvt_set_ops(const struct intel_vgpu_ops *ops) return 0; } -EXPORT_SYMBOL_NS_GPL(intel_gvt_set_ops, I915_GVT); +EXPORT_SYMBOL_NS_GPL(intel_gvt_set_ops, "I915_GVT"); void intel_gvt_clear_ops(const struct intel_vgpu_ops *ops) { @@ -222,7 +222,7 @@ void intel_gvt_clear_ops(const struct intel_vgpu_ops *ops) intel_gvt_ops = NULL; mutex_unlock(&intel_gvt_mutex); } -EXPORT_SYMBOL_NS_GPL(intel_gvt_clear_ops, I915_GVT); +EXPORT_SYMBOL_NS_GPL(intel_gvt_clear_ops, "I915_GVT"); /** * intel_gvt_init - initialize GVT components @@ -284,40 +284,40 @@ void intel_gvt_resume(struct drm_i915_private *dev_priv) * Exported here so that the exports only get created when GVT support is * actually enabled. */ -EXPORT_SYMBOL_NS_GPL(i915_gem_object_alloc, I915_GVT); -EXPORT_SYMBOL_NS_GPL(i915_gem_object_create_shmem, I915_GVT); -EXPORT_SYMBOL_NS_GPL(i915_gem_object_init, I915_GVT); -EXPORT_SYMBOL_NS_GPL(i915_gem_object_ggtt_pin_ww, I915_GVT); -EXPORT_SYMBOL_NS_GPL(i915_gem_object_pin_map, I915_GVT); -EXPORT_SYMBOL_NS_GPL(i915_gem_object_set_to_cpu_domain, I915_GVT); -EXPORT_SYMBOL_NS_GPL(__i915_gem_object_flush_map, I915_GVT); -EXPORT_SYMBOL_NS_GPL(__i915_gem_object_set_pages, I915_GVT); -EXPORT_SYMBOL_NS_GPL(i915_gem_gtt_insert, I915_GVT); -EXPORT_SYMBOL_NS_GPL(i915_gem_prime_export, I915_GVT); -EXPORT_SYMBOL_NS_GPL(i915_gem_ww_ctx_init, I915_GVT); -EXPORT_SYMBOL_NS_GPL(i915_gem_ww_ctx_backoff, I915_GVT); -EXPORT_SYMBOL_NS_GPL(i915_gem_ww_ctx_fini, I915_GVT); -EXPORT_SYMBOL_NS_GPL(i915_ppgtt_create, I915_GVT); -EXPORT_SYMBOL_NS_GPL(i915_request_add, I915_GVT); -EXPORT_SYMBOL_NS_GPL(i915_request_create, I915_GVT); -EXPORT_SYMBOL_NS_GPL(i915_request_wait, I915_GVT); -EXPORT_SYMBOL_NS_GPL(i915_reserve_fence, I915_GVT); -EXPORT_SYMBOL_NS_GPL(i915_unreserve_fence, I915_GVT); -EXPORT_SYMBOL_NS_GPL(i915_vm_release, I915_GVT); -EXPORT_SYMBOL_NS_GPL(_i915_vma_move_to_active, I915_GVT); -EXPORT_SYMBOL_NS_GPL(intel_context_create, I915_GVT); -EXPORT_SYMBOL_NS_GPL(__intel_context_do_pin, I915_GVT); -EXPORT_SYMBOL_NS_GPL(__intel_context_do_unpin, I915_GVT); -EXPORT_SYMBOL_NS_GPL(intel_ring_begin, I915_GVT); -EXPORT_SYMBOL_NS_GPL(intel_runtime_pm_get, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_gem_object_alloc, "I915_GVT"); +EXPORT_SYMBOL_NS_GPL(i915_gem_object_create_shmem, "I915_GVT"); +EXPORT_SYMBOL_NS_GPL(i915_gem_object_init, "I915_GVT"); +EXPORT_SYMBOL_NS_GPL(i915_gem_object_ggtt_pin_ww, "I915_GVT"); +EXPORT_SYMBOL_NS_GPL(i915_gem_object_pin_map, "I915_GVT"); +EXPORT_SYMBOL_NS_GPL(i915_gem_object_set_to_cpu_domain, "I915_GVT"); +EXPORT_SYMBOL_NS_GPL(__i915_gem_object_flush_map, "I915_GVT"); +EXPORT_SYMBOL_NS_GPL(__i915_gem_object_set_pages, "I915_GVT"); +EXPORT_SYMBOL_NS_GPL(i915_gem_gtt_insert, "I915_GVT"); +EXPORT_SYMBOL_NS_GPL(i915_gem_prime_export, "I915_GVT"); +EXPORT_SYMBOL_NS_GPL(i915_gem_ww_ctx_init, "I915_GVT"); +EXPORT_SYMBOL_NS_GPL(i915_gem_ww_ctx_backoff, "I915_GVT"); +EXPORT_SYMBOL_NS_GPL(i915_gem_ww_ctx_fini, "I915_GVT"); +EXPORT_SYMBOL_NS_GPL(i915_ppgtt_create, "I915_GVT"); +EXPORT_SYMBOL_NS_GPL(i915_request_add, "I915_GVT"); +EXPORT_SYMBOL_NS_GPL(i915_request_create, "I915_GVT"); +EXPORT_SYMBOL_NS_GPL(i915_request_wait, "I915_GVT"); +EXPORT_SYMBOL_NS_GPL(i915_reserve_fence, "I915_GVT"); +EXPORT_SYMBOL_NS_GPL(i915_unreserve_fence, "I915_GVT"); +EXPORT_SYMBOL_NS_GPL(i915_vm_release, "I915_GVT"); +EXPORT_SYMBOL_NS_GPL(_i915_vma_move_to_active, "I915_GVT"); +EXPORT_SYMBOL_NS_GPL(intel_context_create, "I915_GVT"); +EXPORT_SYMBOL_NS_GPL(__intel_context_do_pin, "I915_GVT"); +EXPORT_SYMBOL_NS_GPL(__intel_context_do_unpin, "I915_GVT"); +EXPORT_SYMBOL_NS_GPL(intel_ring_begin, "I915_GVT"); +EXPORT_SYMBOL_NS_GPL(intel_runtime_pm_get, "I915_GVT"); #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) -EXPORT_SYMBOL_NS_GPL(intel_runtime_pm_put, I915_GVT); +EXPORT_SYMBOL_NS_GPL(intel_runtime_pm_put, "I915_GVT"); #endif -EXPORT_SYMBOL_NS_GPL(intel_runtime_pm_put_unchecked, I915_GVT); -EXPORT_SYMBOL_NS_GPL(intel_uncore_forcewake_for_reg, I915_GVT); -EXPORT_SYMBOL_NS_GPL(intel_uncore_forcewake_get, I915_GVT); -EXPORT_SYMBOL_NS_GPL(intel_uncore_forcewake_put, I915_GVT); -EXPORT_SYMBOL_NS_GPL(shmem_pin_map, I915_GVT); -EXPORT_SYMBOL_NS_GPL(shmem_unpin_map, I915_GVT); -EXPORT_SYMBOL_NS_GPL(__px_dma, I915_GVT); -EXPORT_SYMBOL_NS_GPL(i915_fence_ops, I915_GVT); +EXPORT_SYMBOL_NS_GPL(intel_runtime_pm_put_unchecked, "I915_GVT"); +EXPORT_SYMBOL_NS_GPL(intel_uncore_forcewake_for_reg, "I915_GVT"); +EXPORT_SYMBOL_NS_GPL(intel_uncore_forcewake_get, "I915_GVT"); +EXPORT_SYMBOL_NS_GPL(intel_uncore_forcewake_put, "I915_GVT"); +EXPORT_SYMBOL_NS_GPL(shmem_pin_map, "I915_GVT"); +EXPORT_SYMBOL_NS_GPL(shmem_unpin_map, "I915_GVT"); +EXPORT_SYMBOL_NS_GPL(__px_dma, "I915_GVT"); +EXPORT_SYMBOL_NS_GPL(i915_fence_ops, "I915_GVT"); diff --git a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c index 63849389f39a..73333b846125 100644 --- a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c +++ b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c @@ -1309,4 +1309,4 @@ int intel_gvt_iterate_mmio_table(struct intel_gvt_mmio_table_iter *iter) err: return ret; } -EXPORT_SYMBOL_NS_GPL(intel_gvt_iterate_mmio_table, I915_GVT); +EXPORT_SYMBOL_NS_GPL(intel_gvt_iterate_mmio_table, "I915_GVT"); diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.c b/drivers/gpu/drm/i915/pxp/intel_pxp.c index da3577149769..9cf169665d7c 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp.c @@ -170,7 +170,7 @@ static struct intel_gt *find_gt_for_required_teelink(struct drm_i915_private *i9 static struct intel_gt *find_gt_for_required_protected_content(struct drm_i915_private *i915) { - if (!IS_ENABLED(CONFIG_DRM_I915_PXP) || !INTEL_INFO(i915)->has_pxp) + if (!HAS_PXP(i915)) return NULL; /* diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c index 61da4ed9d521..0727492576be 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem.c @@ -4,7 +4,7 @@ * Copyright © 2018 Intel Corporation */ -#include <linux/random.h> +#include <linux/prandom.h> #include "gem/i915_gem_internal.h" #include "gem/i915_gem_pm.h" diff --git a/drivers/gpu/drm/i915/selftests/i915_random.h b/drivers/gpu/drm/i915/selftests/i915_random.h index 05364eca20f7..70330a2e80f2 100644 --- a/drivers/gpu/drm/i915/selftests/i915_random.h +++ b/drivers/gpu/drm/i915/selftests/i915_random.h @@ -26,7 +26,7 @@ #define __I915_SELFTESTS_RANDOM_H__ #include <linux/math64.h> -#include <linux/random.h> +#include <linux/prandom.h> #include "../i915_selftest.h" diff --git a/drivers/gpu/drm/i915/selftests/scatterlist.c b/drivers/gpu/drm/i915/selftests/scatterlist.c index 805c4bfb85fe..7e59591bbed6 100644 --- a/drivers/gpu/drm/i915/selftests/scatterlist.c +++ b/drivers/gpu/drm/i915/selftests/scatterlist.c @@ -22,7 +22,7 @@ */ #include <linux/prime_numbers.h> -#include <linux/random.h> +#include <linux/prandom.h> #include "i915_selftest.h" #include "i915_utils.h" diff --git a/drivers/gpu/drm/imagination/pvr_context.c b/drivers/gpu/drm/imagination/pvr_context.c index 98327f9bbd9c..5edc3c01af72 100644 --- a/drivers/gpu/drm/imagination/pvr_context.c +++ b/drivers/gpu/drm/imagination/pvr_context.c @@ -17,10 +17,14 @@ #include <drm/drm_auth.h> #include <drm/drm_managed.h> + +#include <linux/bug.h> #include <linux/errno.h> #include <linux/kernel.h> +#include <linux/list.h> #include <linux/sched.h> #include <linux/slab.h> +#include <linux/spinlock.h> #include <linux/string.h> #include <linux/types.h> #include <linux/xarray.h> @@ -342,6 +346,10 @@ int pvr_context_create(struct pvr_file *pvr_file, struct drm_pvr_ioctl_create_co return err; } + spin_lock(&pvr_dev->ctx_list_lock); + list_add_tail(&ctx->file_link, &pvr_file->contexts); + spin_unlock(&pvr_dev->ctx_list_lock); + return 0; err_destroy_fw_obj: @@ -368,6 +376,11 @@ pvr_context_release(struct kref *ref_count) container_of(ref_count, struct pvr_context, ref_count); struct pvr_device *pvr_dev = ctx->pvr_dev; + WARN_ON(in_interrupt()); + spin_lock(&pvr_dev->ctx_list_lock); + list_del(&ctx->file_link); + spin_unlock(&pvr_dev->ctx_list_lock); + xa_erase(&pvr_dev->ctx_ids, ctx->ctx_id); pvr_context_destroy_queues(ctx); pvr_fw_object_destroy(ctx->fw_obj); @@ -425,11 +438,30 @@ pvr_context_destroy(struct pvr_file *pvr_file, u32 handle) */ void pvr_destroy_contexts_for_file(struct pvr_file *pvr_file) { + struct pvr_device *pvr_dev = pvr_file->pvr_dev; struct pvr_context *ctx; unsigned long handle; xa_for_each(&pvr_file->ctx_handles, handle, ctx) pvr_context_destroy(pvr_file, handle); + + spin_lock(&pvr_dev->ctx_list_lock); + ctx = list_first_entry(&pvr_file->contexts, struct pvr_context, file_link); + + while (!list_entry_is_head(ctx, &pvr_file->contexts, file_link)) { + list_del_init(&ctx->file_link); + + if (pvr_context_get_if_referenced(ctx)) { + spin_unlock(&pvr_dev->ctx_list_lock); + + pvr_vm_unmap_all(ctx->vm_ctx); + + pvr_context_put(ctx); + spin_lock(&pvr_dev->ctx_list_lock); + } + ctx = list_first_entry(&pvr_file->contexts, struct pvr_context, file_link); + } + spin_unlock(&pvr_dev->ctx_list_lock); } /** @@ -439,6 +471,7 @@ void pvr_destroy_contexts_for_file(struct pvr_file *pvr_file) void pvr_context_device_init(struct pvr_device *pvr_dev) { xa_init_flags(&pvr_dev->ctx_ids, XA_FLAGS_ALLOC1); + spin_lock_init(&pvr_dev->ctx_list_lock); } /** diff --git a/drivers/gpu/drm/imagination/pvr_context.h b/drivers/gpu/drm/imagination/pvr_context.h index 0c7b97dfa6ba..07afa179cdf4 100644 --- a/drivers/gpu/drm/imagination/pvr_context.h +++ b/drivers/gpu/drm/imagination/pvr_context.h @@ -85,6 +85,9 @@ struct pvr_context { /** @compute: Transfer queue. */ struct pvr_queue *transfer; } queues; + + /** @file_link: pvr_file PVR context list link. */ + struct list_head file_link; }; static __always_inline struct pvr_queue * @@ -124,6 +127,24 @@ pvr_context_get(struct pvr_context *ctx) } /** + * pvr_context_get_if_referenced() - Take an additional reference on a still + * referenced context. + * @ctx: Context pointer. + * + * Call pvr_context_put() to release. + * + * Returns: + * * True on success, or + * * false if no context pointer passed, or the context wasn't still + * * referenced. + */ +static __always_inline bool +pvr_context_get_if_referenced(struct pvr_context *ctx) +{ + return ctx != NULL && kref_get_unless_zero(&ctx->ref_count) != 0; +} + +/** * pvr_context_lookup() - Lookup context pointer from handle and file. * @pvr_file: Pointer to pvr_file structure. * @handle: Context handle. diff --git a/drivers/gpu/drm/imagination/pvr_device.h b/drivers/gpu/drm/imagination/pvr_device.h index b574e23d484b..6d0dfacb677b 100644 --- a/drivers/gpu/drm/imagination/pvr_device.h +++ b/drivers/gpu/drm/imagination/pvr_device.h @@ -23,6 +23,7 @@ #include <linux/kernel.h> #include <linux/math.h> #include <linux/mutex.h> +#include <linux/spinlock_types.h> #include <linux/timer.h> #include <linux/types.h> #include <linux/wait.h> @@ -293,6 +294,12 @@ struct pvr_device { /** @sched_wq: Workqueue for schedulers. */ struct workqueue_struct *sched_wq; + + /** + * @ctx_list_lock: Lock to be held when accessing the context list in + * struct pvr_file. + */ + spinlock_t ctx_list_lock; }; /** @@ -344,6 +351,9 @@ struct pvr_file { * This array is used to allocate handles returned to userspace. */ struct xarray vm_ctx_handles; + + /** @contexts: PVR context list. */ + struct list_head contexts; }; /** diff --git a/drivers/gpu/drm/imagination/pvr_drv.c b/drivers/gpu/drm/imagination/pvr_drv.c index 684a9b9a2247..85ee9abd1811 100644 --- a/drivers/gpu/drm/imagination/pvr_drv.c +++ b/drivers/gpu/drm/imagination/pvr_drv.c @@ -28,6 +28,7 @@ #include <linux/export.h> #include <linux/fs.h> #include <linux/kernel.h> +#include <linux/list.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/moduleparam.h> @@ -1326,6 +1327,8 @@ pvr_drm_driver_open(struct drm_device *drm_dev, struct drm_file *file) */ pvr_file->pvr_dev = pvr_dev; + INIT_LIST_HEAD(&pvr_file->contexts); + xa_init_flags(&pvr_file->ctx_handles, XA_FLAGS_ALLOC1); xa_init_flags(&pvr_file->free_list_handles, XA_FLAGS_ALLOC1); xa_init_flags(&pvr_file->hwrt_handles, XA_FLAGS_ALLOC1); @@ -1482,7 +1485,7 @@ static const struct dev_pm_ops pvr_pm_ops = { static struct platform_driver pvr_driver = { .probe = pvr_probe, - .remove_new = pvr_remove, + .remove = pvr_remove, .driver = { .name = PVR_DRIVER_NAME, .pm = &pvr_pm_ops, @@ -1494,5 +1497,5 @@ module_platform_driver(pvr_driver); MODULE_AUTHOR("Imagination Technologies Ltd."); MODULE_DESCRIPTION(PVR_DRIVER_DESC); MODULE_LICENSE("Dual MIT/GPL"); -MODULE_IMPORT_NS(DMA_BUF); +MODULE_IMPORT_NS("DMA_BUF"); MODULE_FIRMWARE("powervr/rogue_33.15.11.3_v1.fw"); diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c index 39773991710a..363f885a7098 100644 --- a/drivers/gpu/drm/imagination/pvr_vm.c +++ b/drivers/gpu/drm/imagination/pvr_vm.c @@ -14,6 +14,7 @@ #include <drm/drm_gem.h> #include <drm/drm_gpuvm.h> +#include <linux/bug.h> #include <linux/container_of.h> #include <linux/err.h> #include <linux/errno.h> @@ -597,12 +598,26 @@ err_free: } /** - * pvr_vm_context_release() - Teardown a VM context. - * @ref_count: Pointer to reference counter of the VM context. + * pvr_vm_unmap_all() - Unmap all mappings associated with a VM context. + * @vm_ctx: Target VM context. * * This function ensures that no mappings are left dangling by unmapping them * all in order of ascending device-virtual address. */ +void +pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx) +{ + WARN_ON(pvr_vm_unmap(vm_ctx, vm_ctx->gpuvm_mgr.mm_start, + vm_ctx->gpuvm_mgr.mm_range)); +} + +/** + * pvr_vm_context_release() - Teardown a VM context. + * @ref_count: Pointer to reference counter of the VM context. + * + * This function also ensures that no mappings are left dangling by calling + * pvr_vm_unmap_all. + */ static void pvr_vm_context_release(struct kref *ref_count) { @@ -612,8 +627,7 @@ pvr_vm_context_release(struct kref *ref_count) if (vm_ctx->fw_mem_ctx_obj) pvr_fw_object_destroy(vm_ctx->fw_mem_ctx_obj); - WARN_ON(pvr_vm_unmap(vm_ctx, vm_ctx->gpuvm_mgr.mm_start, - vm_ctx->gpuvm_mgr.mm_range)); + pvr_vm_unmap_all(vm_ctx); pvr_mmu_context_destroy(vm_ctx->mmu_ctx); drm_gem_private_object_fini(&vm_ctx->dummy_gem); diff --git a/drivers/gpu/drm/imagination/pvr_vm.h b/drivers/gpu/drm/imagination/pvr_vm.h index f2a6463f2b05..79406243617c 100644 --- a/drivers/gpu/drm/imagination/pvr_vm.h +++ b/drivers/gpu/drm/imagination/pvr_vm.h @@ -39,6 +39,7 @@ int pvr_vm_map(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj, u64 pvr_obj_offset, u64 device_addr, u64 size); int pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size); +void pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx); dma_addr_t pvr_vm_get_page_table_root_addr(struct pvr_vm_context *vm_ctx); struct dma_resv *pvr_vm_get_dma_resv(struct pvr_vm_context *vm_ctx); diff --git a/drivers/gpu/drm/imx/dcss/Kconfig b/drivers/gpu/drm/imx/dcss/Kconfig index 9d58f41bd00c..e014ed3ae66c 100644 --- a/drivers/gpu/drm/imx/dcss/Kconfig +++ b/drivers/gpu/drm/imx/dcss/Kconfig @@ -7,7 +7,7 @@ config DRM_IMX_DCSS select DRM_BRIDGE_CONNECTOR select DRM_GEM_DMA_HELPER select VIDEOMODE_HELPERS - depends on DRM && ARCH_MXC && ARM64 + depends on DRM && ((ARCH_MXC && ARM64) || COMPILE_TEST) help Choose this if you have a NXP i.MX8MQ based system and want to use the Display Controller Subsystem. This option enables DCSS support. diff --git a/drivers/gpu/drm/imx/dcss/dcss-drv.c b/drivers/gpu/drm/imx/dcss/dcss-drv.c index d881f5a34760..19b027cc1dc4 100644 --- a/drivers/gpu/drm/imx/dcss/dcss-drv.c +++ b/drivers/gpu/drm/imx/dcss/dcss-drv.c @@ -112,7 +112,7 @@ MODULE_DEVICE_TABLE(of, dcss_of_match); static struct platform_driver dcss_platform_driver = { .probe = dcss_drv_platform_probe, - .remove_new = dcss_drv_platform_remove, + .remove = dcss_drv_platform_remove, .shutdown = dcss_drv_platform_shutdown, .driver = { .name = "imx-dcss", diff --git a/drivers/gpu/drm/imx/dcss/dcss-kms.c b/drivers/gpu/drm/imx/dcss/dcss-kms.c index 3ec721afc30c..63a335c62296 100644 --- a/drivers/gpu/drm/imx/dcss/dcss-kms.c +++ b/drivers/gpu/drm/imx/dcss/dcss-kms.c @@ -5,6 +5,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> #include <drm/drm_bridge_connector.h> #include <drm/drm_client_setup.h> #include <drm/drm_drv.h> diff --git a/drivers/gpu/drm/imx/dcss/dcss-scaler.c b/drivers/gpu/drm/imx/dcss/dcss-scaler.c index 825728c356ff..32c3f46b21da 100644 --- a/drivers/gpu/drm/imx/dcss/dcss-scaler.c +++ b/drivers/gpu/drm/imx/dcss/dcss-scaler.c @@ -136,7 +136,7 @@ static int div_q(int A, int B) else temp -= B / 2; - result = (int)(temp / B); + result = div_s64(temp, B); return result; } @@ -239,7 +239,7 @@ static void dcss_scaler_gaussian_filter(int fc_q, bool use_5_taps, ll_temp = coef[phase][i]; ll_temp <<= PSC_COEFF_PRECISION; ll_temp += sum >> 1; - ll_temp /= sum; + ll_temp = div_s64(ll_temp, sum); coef[phase][i] = (int)ll_temp; } } diff --git a/drivers/gpu/drm/imx/ipuv3/Kconfig b/drivers/gpu/drm/imx/ipuv3/Kconfig index 0ea1feb84672..acaf25089001 100644 --- a/drivers/gpu/drm/imx/ipuv3/Kconfig +++ b/drivers/gpu/drm/imx/ipuv3/Kconfig @@ -15,6 +15,7 @@ config DRM_IMX_PARALLEL_DISPLAY depends on DRM_IMX select DRM_BRIDGE select DRM_BRIDGE_CONNECTOR + select DRM_IMX_LEGACY_BRIDGE select DRM_PANEL_BRIDGE select VIDEOMODE_HELPERS diff --git a/drivers/gpu/drm/imx/ipuv3/dw_hdmi-imx.c b/drivers/gpu/drm/imx/ipuv3/dw_hdmi-imx.c index 0006ea52b83c..8333c4bf7369 100644 --- a/drivers/gpu/drm/imx/ipuv3/dw_hdmi-imx.c +++ b/drivers/gpu/drm/imx/ipuv3/dw_hdmi-imx.c @@ -265,7 +265,7 @@ static void dw_hdmi_imx_remove(struct platform_device *pdev) static struct platform_driver dw_hdmi_imx_platform_driver = { .probe = dw_hdmi_imx_probe, - .remove_new = dw_hdmi_imx_remove, + .remove = dw_hdmi_imx_remove, .driver = { .name = "dwhdmi-imx", .of_match_table = dw_hdmi_imx_dt_ids, diff --git a/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c b/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c index ced06bd8eae8..5f2c93c3c288 100644 --- a/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c +++ b/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c @@ -325,7 +325,7 @@ MODULE_DEVICE_TABLE(of, imx_drm_dt_ids); static struct platform_driver imx_drm_pdrv = { .probe = imx_drm_platform_probe, - .remove_new = imx_drm_platform_remove, + .remove = imx_drm_platform_remove, .shutdown = imx_drm_platform_shutdown, .driver = { .name = "imx-drm", diff --git a/drivers/gpu/drm/imx/ipuv3/imx-ldb.c b/drivers/gpu/drm/imx/ipuv3/imx-ldb.c index ff74018ac5cd..6be7a57ad03d 100644 --- a/drivers/gpu/drm/imx/ipuv3/imx-ldb.c +++ b/drivers/gpu/drm/imx/ipuv3/imx-ldb.c @@ -632,7 +632,7 @@ static void imx_ldb_remove(struct platform_device *pdev) static struct platform_driver imx_ldb_driver = { .probe = imx_ldb_probe, - .remove_new = imx_ldb_remove, + .remove = imx_ldb_remove, .driver = { .of_match_table = imx_ldb_dt_ids, .name = DRIVER_NAME, diff --git a/drivers/gpu/drm/imx/ipuv3/imx-tve.c b/drivers/gpu/drm/imx/ipuv3/imx-tve.c index d46d07d25f51..3a3c8a195119 100644 --- a/drivers/gpu/drm/imx/ipuv3/imx-tve.c +++ b/drivers/gpu/drm/imx/ipuv3/imx-tve.c @@ -662,7 +662,7 @@ MODULE_DEVICE_TABLE(of, imx_tve_dt_ids); static struct platform_driver imx_tve_driver = { .probe = imx_tve_probe, - .remove_new = imx_tve_remove, + .remove = imx_tve_remove, .driver = { .of_match_table = imx_tve_dt_ids, .name = "imx-tve", diff --git a/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c index 99db53e167bd..cf7b02b2d52c 100644 --- a/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c @@ -449,5 +449,5 @@ struct platform_driver ipu_drm_driver = { .name = "imx-ipuv3-crtc", }, .probe = ipu_drm_probe, - .remove_new = ipu_drm_remove, + .remove = ipu_drm_remove, }; diff --git a/drivers/gpu/drm/imx/ipuv3/parallel-display.c b/drivers/gpu/drm/imx/ipuv3/parallel-display.c index 70f62e89622e..9e66eb77b1eb 100644 --- a/drivers/gpu/drm/imx/ipuv3/parallel-display.c +++ b/drivers/gpu/drm/imx/ipuv3/parallel-display.c @@ -273,7 +273,7 @@ MODULE_DEVICE_TABLE(of, imx_pd_dt_ids); static struct platform_driver imx_pd_driver = { .probe = imx_pd_probe, - .remove_new = imx_pd_remove, + .remove = imx_pd_remove, .driver = { .of_match_table = imx_pd_dt_ids, .name = "imx-parallel-display", diff --git a/drivers/gpu/drm/imx/lcdc/imx-lcdc.c b/drivers/gpu/drm/imx/lcdc/imx-lcdc.c index 3215c4acd675..fa7d44623c52 100644 --- a/drivers/gpu/drm/imx/lcdc/imx-lcdc.c +++ b/drivers/gpu/drm/imx/lcdc/imx-lcdc.c @@ -527,7 +527,7 @@ static struct platform_driver imx_lcdc_driver = { .of_match_table = imx_lcdc_of_dev_id, }, .probe = imx_lcdc_probe, - .remove_new = imx_lcdc_remove, + .remove = imx_lcdc_remove, .shutdown = imx_lcdc_shutdown, }; module_platform_driver(imx_lcdc_driver); diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c index 056b70b63554..8469e1e5e582 100644 --- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c +++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c @@ -1632,7 +1632,7 @@ static struct platform_driver ingenic_drm_driver = { .of_match_table = of_match_ptr(ingenic_drm_of_match), }, .probe = ingenic_drm_probe, - .remove_new = ingenic_drm_remove, + .remove = ingenic_drm_remove, .shutdown = ingenic_drm_shutdown, }; diff --git a/drivers/gpu/drm/ingenic/ingenic-ipu.c b/drivers/gpu/drm/ingenic/ingenic-ipu.c index 5bd9072352b5..26ebf424d63e 100644 --- a/drivers/gpu/drm/ingenic/ingenic-ipu.c +++ b/drivers/gpu/drm/ingenic/ingenic-ipu.c @@ -991,7 +991,7 @@ static struct platform_driver ingenic_ipu_driver = { .of_match_table = ingenic_ipu_of_match, }, .probe = ingenic_ipu_probe, - .remove_new = ingenic_ipu_remove, + .remove = ingenic_ipu_remove, }; struct platform_driver *ingenic_ipu_driver_ptr = &ingenic_ipu_driver; diff --git a/drivers/gpu/drm/kmb/kmb_drv.c b/drivers/gpu/drm/kmb/kmb_drv.c index 0274ab9caa85..a3d31de761cb 100644 --- a/drivers/gpu/drm/kmb/kmb_drv.c +++ b/drivers/gpu/drm/kmb/kmb_drv.c @@ -622,7 +622,7 @@ static SIMPLE_DEV_PM_OPS(kmb_pm_ops, kmb_pm_suspend, kmb_pm_resume); static struct platform_driver kmb_platform_driver = { .probe = kmb_probe, - .remove_new = kmb_remove, + .remove = kmb_remove, .driver = { .name = "kmb-drm", .pm = &kmb_pm_ops, diff --git a/drivers/gpu/drm/lib/drm_random.h b/drivers/gpu/drm/lib/drm_random.h index 5543bf0474bc..9f827260a89d 100644 --- a/drivers/gpu/drm/lib/drm_random.h +++ b/drivers/gpu/drm/lib/drm_random.h @@ -6,7 +6,7 @@ * be transposed to lib/ at the earliest convenience. */ -#include <linux/random.h> +#include <linux/prandom.h> #define DRM_RND_STATE_INITIALIZER(seed__) ({ \ struct rnd_state state__; \ diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c index 10bce18b7c31..fb3062c872b3 100644 --- a/drivers/gpu/drm/lima/lima_drv.c +++ b/drivers/gpu/drm/lima/lima_drv.c @@ -488,7 +488,7 @@ static const struct dev_pm_ops lima_pm_ops = { static struct platform_driver lima_platform_driver = { .probe = lima_pdev_probe, - .remove_new = lima_pdev_remove, + .remove = lima_pdev_remove, .driver = { .name = "lima", .pm = &lima_pm_ops, diff --git a/drivers/gpu/drm/logicvc/logicvc_drm.c b/drivers/gpu/drm/logicvc/logicvc_drm.c index e4d90701b29d..fb9de5e0bc0e 100644 --- a/drivers/gpu/drm/logicvc/logicvc_drm.c +++ b/drivers/gpu/drm/logicvc/logicvc_drm.c @@ -491,7 +491,7 @@ MODULE_DEVICE_TABLE(of, logicvc_drm_of_table); static struct platform_driver logicvc_drm_platform_driver = { .probe = logicvc_drm_probe, - .remove_new = logicvc_drm_remove, + .remove = logicvc_drm_remove, .shutdown = logicvc_drm_shutdown, .driver = { .name = "logicvc-drm", diff --git a/drivers/gpu/drm/loongson/lsdc_drv.c b/drivers/gpu/drm/loongson/lsdc_drv.c index a19dce35c839..b350bdcf1645 100644 --- a/drivers/gpu/drm/loongson/lsdc_drv.c +++ b/drivers/gpu/drm/loongson/lsdc_drv.c @@ -3,10 +3,10 @@ * Copyright (C) 2023 Loongson Technology Corporation Limited */ +#include <linux/aperture.h> #include <linux/pci.h> #include <linux/vgaarb.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_client_setup.h> @@ -215,9 +215,9 @@ lsdc_create_device(struct pci_dev *pdev, return ERR_PTR(ret); } - ret = drm_aperture_remove_conflicting_framebuffers(ldev->vram_base, - ldev->vram_size, - driver); + ret = aperture_remove_conflicting_devices(ldev->vram_base, + ldev->vram_size, + driver->name); if (ret) { drm_err(ddev, "Remove firmware framebuffers failed: %d\n", ret); return ERR_PTR(ret); diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c index f60bdd7b6c13..c4d51f5f038d 100644 --- a/drivers/gpu/drm/mcde/mcde_drv.c +++ b/drivers/gpu/drm/mcde/mcde_drv.c @@ -483,7 +483,7 @@ static struct platform_driver mcde_driver = { .of_match_table = mcde_of_match, }, .probe = mcde_probe, - .remove_new = mcde_remove, + .remove = mcde_remove, .shutdown = mcde_shutdown, }; diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c index e2fad1a048b5..395449a72f0a 100644 --- a/drivers/gpu/drm/mcde/mcde_dsi.c +++ b/drivers/gpu/drm/mcde/mcde_dsi.c @@ -1229,5 +1229,5 @@ struct platform_driver mcde_dsi_driver = { .of_match_table = mcde_dsi_of_match, }, .probe = mcde_dsi_probe, - .remove_new = mcde_dsi_remove, + .remove = mcde_dsi_remove, }; diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig index 3d2436272ff2..f496e6cfdfe0 100644 --- a/drivers/gpu/drm/mediatek/Kconfig +++ b/drivers/gpu/drm/mediatek/Kconfig @@ -2,9 +2,9 @@ config DRM_MEDIATEK tristate "DRM Support for Mediatek SoCs" depends on DRM - depends on ARCH_MEDIATEK || (ARM && COMPILE_TEST) + depends on ARCH_MEDIATEK || COMPILE_TEST depends on COMMON_CLK - depends on HAVE_ARM_SMCCC + depends on HAVE_ARM_SMCCC || COMPILE_TEST depends on OF depends on MTK_MMSYS select DRM_CLIENT_SELECTION diff --git a/drivers/gpu/drm/mediatek/mtk_cec.c b/drivers/gpu/drm/mediatek/mtk_cec.c index 2de248443147..b42c0d87eba3 100644 --- a/drivers/gpu/drm/mediatek/mtk_cec.c +++ b/drivers/gpu/drm/mediatek/mtk_cec.c @@ -241,7 +241,7 @@ MODULE_DEVICE_TABLE(of, mtk_cec_of_ids); struct platform_driver mtk_cec_driver = { .probe = mtk_cec_probe, - .remove_new = mtk_cec_remove, + .remove = mtk_cec_remove, .driver = { .name = "mediatek-cec", .of_match_table = mtk_cec_of_ids, diff --git a/drivers/gpu/drm/mediatek/mtk_crtc.c b/drivers/gpu/drm/mediatek/mtk_crtc.c index 175b00e5a253..eb0e1233ad04 100644 --- a/drivers/gpu/drm/mediatek/mtk_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_crtc.c @@ -127,9 +127,8 @@ static void mtk_crtc_destroy(struct drm_crtc *crtc) mtk_mutex_put(mtk_crtc->mutex); #if IS_REACHABLE(CONFIG_MTK_CMDQ) - cmdq_pkt_destroy(&mtk_crtc->cmdq_client, &mtk_crtc->cmdq_handle); - if (mtk_crtc->cmdq_client.chan) { + cmdq_pkt_destroy(&mtk_crtc->cmdq_client, &mtk_crtc->cmdq_handle); mbox_free_channel(mtk_crtc->cmdq_client.chan); mtk_crtc->cmdq_client.chan = NULL; } @@ -913,6 +912,7 @@ static int mtk_crtc_init_comp_planes(struct drm_device *drm_dev, BIT(pipe), mtk_crtc_plane_type(mtk_crtc->layer_nr, num_planes), mtk_ddp_comp_supported_rotations(comp), + mtk_ddp_comp_get_blend_modes(comp), mtk_ddp_comp_get_formats(comp), mtk_ddp_comp_get_num_formats(comp), i); if (ret) diff --git a/drivers/gpu/drm/mediatek/mtk_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_ddp_comp.c index be66d94be361..edc6417639e6 100644 --- a/drivers/gpu/drm/mediatek/mtk_ddp_comp.c +++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.c @@ -363,6 +363,7 @@ static const struct mtk_ddp_comp_funcs ddp_ovl = { .layer_config = mtk_ovl_layer_config, .bgclr_in_on = mtk_ovl_bgclr_in_on, .bgclr_in_off = mtk_ovl_bgclr_in_off, + .get_blend_modes = mtk_ovl_get_blend_modes, .get_formats = mtk_ovl_get_formats, .get_num_formats = mtk_ovl_get_num_formats, }; @@ -416,6 +417,7 @@ static const struct mtk_ddp_comp_funcs ddp_ovl_adaptor = { .disconnect = mtk_ovl_adaptor_disconnect, .add = mtk_ovl_adaptor_add_comp, .remove = mtk_ovl_adaptor_remove_comp, + .get_blend_modes = mtk_ovl_adaptor_get_blend_modes, .get_formats = mtk_ovl_adaptor_get_formats, .get_num_formats = mtk_ovl_adaptor_get_num_formats, .mode_valid = mtk_ovl_adaptor_mode_valid, diff --git a/drivers/gpu/drm/mediatek/mtk_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_ddp_comp.h index ecf6dc283cd7..39720b27f4e9 100644 --- a/drivers/gpu/drm/mediatek/mtk_ddp_comp.h +++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.h @@ -80,6 +80,7 @@ struct mtk_ddp_comp_funcs { void (*ctm_set)(struct device *dev, struct drm_crtc_state *state); struct device * (*dma_dev_get)(struct device *dev); + u32 (*get_blend_modes)(struct device *dev); const u32 *(*get_formats)(struct device *dev); size_t (*get_num_formats)(struct device *dev); void (*connect)(struct device *dev, struct device *mmsys_dev, unsigned int next); @@ -267,6 +268,15 @@ static inline struct device *mtk_ddp_comp_dma_dev_get(struct mtk_ddp_comp *comp) } static inline +u32 mtk_ddp_comp_get_blend_modes(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->get_blend_modes) + return comp->funcs->get_blend_modes(comp->dev); + + return 0; +} + +static inline const u32 *mtk_ddp_comp_get_formats(struct mtk_ddp_comp *comp) { if (comp->funcs && comp->funcs->get_formats) diff --git a/drivers/gpu/drm/mediatek/mtk_disp_aal.c b/drivers/gpu/drm/mediatek/mtk_disp_aal.c index 59fb9a08d54b..abc9e5525d03 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_aal.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_aal.c @@ -218,7 +218,7 @@ MODULE_DEVICE_TABLE(of, mtk_disp_aal_driver_dt_match); struct platform_driver mtk_disp_aal_driver = { .probe = mtk_disp_aal_probe, - .remove_new = mtk_disp_aal_remove, + .remove = mtk_disp_aal_remove, .driver = { .name = "mediatek-disp-aal", .of_match_table = mtk_disp_aal_driver_dt_match, diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c b/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c index 9b75727e0861..10d60d2c2a56 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c @@ -209,7 +209,7 @@ MODULE_DEVICE_TABLE(of, mtk_disp_ccorr_driver_dt_match); struct platform_driver mtk_disp_ccorr_driver = { .probe = mtk_disp_ccorr_probe, - .remove_new = mtk_disp_ccorr_remove, + .remove = mtk_disp_ccorr_remove, .driver = { .name = "mediatek-disp-ccorr", .of_match_table = mtk_disp_ccorr_driver_dt_match, diff --git a/drivers/gpu/drm/mediatek/mtk_disp_color.c b/drivers/gpu/drm/mediatek/mtk_disp_color.c index 2fd5e7dc9e24..dd8433a38282 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_color.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_color.c @@ -159,7 +159,7 @@ MODULE_DEVICE_TABLE(of, mtk_disp_color_driver_dt_match); struct platform_driver mtk_disp_color_driver = { .probe = mtk_disp_color_probe, - .remove_new = mtk_disp_color_remove, + .remove = mtk_disp_color_remove, .driver = { .name = "mediatek-disp-color", .of_match_table = mtk_disp_color_driver_dt_match, diff --git a/drivers/gpu/drm/mediatek/mtk_disp_drv.h b/drivers/gpu/drm/mediatek/mtk_disp_drv.h index 082ac18fe04a..04217a36939c 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_drv.h +++ b/drivers/gpu/drm/mediatek/mtk_disp_drv.h @@ -103,11 +103,13 @@ void mtk_ovl_register_vblank_cb(struct device *dev, void mtk_ovl_unregister_vblank_cb(struct device *dev); void mtk_ovl_enable_vblank(struct device *dev); void mtk_ovl_disable_vblank(struct device *dev); +u32 mtk_ovl_get_blend_modes(struct device *dev); const u32 *mtk_ovl_get_formats(struct device *dev); size_t mtk_ovl_get_num_formats(struct device *dev); void mtk_ovl_adaptor_add_comp(struct device *dev, struct mtk_mutex *mutex); void mtk_ovl_adaptor_remove_comp(struct device *dev, struct mtk_mutex *mutex); +bool mtk_ovl_adaptor_is_comp_present(struct device_node *node); void mtk_ovl_adaptor_connect(struct device *dev, struct device *mmsys_dev, unsigned int next); void mtk_ovl_adaptor_disconnect(struct device *dev, struct device *mmsys_dev, @@ -131,6 +133,7 @@ void mtk_ovl_adaptor_start(struct device *dev); void mtk_ovl_adaptor_stop(struct device *dev); unsigned int mtk_ovl_adaptor_layer_nr(struct device *dev); struct device *mtk_ovl_adaptor_dma_dev_get(struct device *dev); +u32 mtk_ovl_adaptor_get_blend_modes(struct device *dev); const u32 *mtk_ovl_adaptor_get_formats(struct device *dev); size_t mtk_ovl_adaptor_get_num_formats(struct device *dev); enum drm_mode_status mtk_ovl_adaptor_mode_valid(struct device *dev, diff --git a/drivers/gpu/drm/mediatek/mtk_disp_gamma.c b/drivers/gpu/drm/mediatek/mtk_disp_gamma.c index f0b38817ba6c..b17b11d93846 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_gamma.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_gamma.c @@ -329,7 +329,7 @@ MODULE_DEVICE_TABLE(of, mtk_disp_gamma_driver_dt_match); struct platform_driver mtk_disp_gamma_driver = { .probe = mtk_disp_gamma_probe, - .remove_new = mtk_disp_gamma_remove, + .remove = mtk_disp_gamma_remove, .driver = { .name = "mediatek-disp-gamma", .of_match_table = mtk_disp_gamma_driver_dt_match, diff --git a/drivers/gpu/drm/mediatek/mtk_disp_merge.c b/drivers/gpu/drm/mediatek/mtk_disp_merge.c index 435e5d9c8520..563b1b248fbb 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_merge.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_merge.c @@ -370,7 +370,7 @@ MODULE_DEVICE_TABLE(of, mtk_disp_merge_driver_dt_match); struct platform_driver mtk_disp_merge_driver = { .probe = mtk_disp_merge_probe, - .remove_new = mtk_disp_merge_remove, + .remove = mtk_disp_merge_remove, .driver = { .name = "mediatek-disp-merge", .of_match_table = mtk_disp_merge_driver_dt_match, diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c index 89b439dcf3a6..f731d4fbe8b6 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c @@ -65,8 +65,8 @@ #define OVL_CON_CLRFMT_RGB (1 << 12) #define OVL_CON_CLRFMT_ARGB8888 (2 << 12) #define OVL_CON_CLRFMT_RGBA8888 (3 << 12) -#define OVL_CON_CLRFMT_ABGR8888 (OVL_CON_CLRFMT_RGBA8888 | OVL_CON_BYTE_SWAP) -#define OVL_CON_CLRFMT_BGRA8888 (OVL_CON_CLRFMT_ARGB8888 | OVL_CON_BYTE_SWAP) +#define OVL_CON_CLRFMT_ABGR8888 (OVL_CON_CLRFMT_ARGB8888 | OVL_CON_BYTE_SWAP) +#define OVL_CON_CLRFMT_BGRA8888 (OVL_CON_CLRFMT_RGBA8888 | OVL_CON_BYTE_SWAP) #define OVL_CON_CLRFMT_UYVY (4 << 12) #define OVL_CON_CLRFMT_YUYV (5 << 12) #define OVL_CON_MTX_YUV_TO_RGB (6 << 16) @@ -146,6 +146,7 @@ struct mtk_disp_ovl_data { bool fmt_rgb565_is_0; bool smi_id_en; bool supports_afbc; + const u32 blend_modes; const u32 *formats; size_t num_formats; bool supports_clrfmt_ext; @@ -214,6 +215,13 @@ void mtk_ovl_disable_vblank(struct device *dev) writel_relaxed(0x0, ovl->regs + DISP_REG_OVL_INTEN); } +u32 mtk_ovl_get_blend_modes(struct device *dev) +{ + struct mtk_disp_ovl *ovl = dev_get_drvdata(dev); + + return ovl->data->blend_modes; +} + const u32 *mtk_ovl_get_formats(struct device *dev) { struct mtk_disp_ovl *ovl = dev_get_drvdata(dev); @@ -386,14 +394,27 @@ void mtk_ovl_layer_off(struct device *dev, unsigned int idx, DISP_REG_OVL_RDMA_CTRL(idx)); } -static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt, - unsigned int blend_mode) +static unsigned int mtk_ovl_fmt_convert(struct mtk_disp_ovl *ovl, + struct mtk_plane_state *state) { - /* The return value in switch "MEM_MODE_INPUT_FORMAT_XXX" - * is defined in mediatek HW data sheet. - * The alphabet order in XXX is no relation to data - * arrangement in memory. + unsigned int fmt = state->pending.format; + unsigned int blend_mode = DRM_MODE_BLEND_COVERAGE; + + /* + * For the platforms where OVL_CON_CLRFMT_MAN is defined in the hardware data sheet + * and supports premultiplied color formats, such as OVL_CON_CLRFMT_PARGB8888. + * + * Check blend_modes in the driver data to see if premultiplied mode is supported. + * If not, use coverage mode instead to set it to the supported color formats. + * + * Current DRM assumption is that alpha is default premultiplied, so the bitmask of + * blend_modes must include BIT(DRM_MODE_BLEND_PREMULTI). Otherwise, mtk_plane_init() + * will get an error return from drm_plane_create_blend_mode_property() and + * state->base.pixel_blend_mode should not be used. */ + if (ovl->data->blend_modes & BIT(DRM_MODE_BLEND_PREMULTI)) + blend_mode = state->base.pixel_blend_mode; + switch (fmt) { default: case DRM_FORMAT_RGB565: @@ -471,20 +492,26 @@ void mtk_ovl_layer_config(struct device *dev, unsigned int idx, return; } - con = ovl_fmt_convert(ovl, fmt, blend_mode); + con = mtk_ovl_fmt_convert(ovl, state); if (state->base.fb) { - con |= OVL_CON_AEN; con |= state->base.alpha & OVL_CON_ALPHA; - } - /* CONST_BLD must be enabled for XRGB formats although the alpha channel - * can be ignored, or OVL will still read the value from memory. - * For RGB888 related formats, whether CONST_BLD is enabled or not won't - * affect the result. Therefore we use !has_alpha as the condition. - */ - if ((state->base.fb && !state->base.fb->format->has_alpha) || - blend_mode == DRM_MODE_BLEND_PIXEL_NONE) - ignore_pixel_alpha = OVL_CONST_BLEND; + /* + * For blend_modes supported SoCs, always enable alpha blending. + * For blend_modes unsupported SoCs, enable alpha blending when has_alpha is set. + */ + if (blend_mode || state->base.fb->format->has_alpha) + con |= OVL_CON_AEN; + + /* + * Although the alpha channel can be ignored, CONST_BLD must be enabled + * for XRGB format, otherwise OVL will still read the value from memory. + * For RGB888 related formats, whether CONST_BLD is enabled or not won't + * affect the result. Therefore we use !has_alpha as the condition. + */ + if (blend_mode == DRM_MODE_BLEND_PIXEL_NONE || !state->base.fb->format->has_alpha) + ignore_pixel_alpha = OVL_CONST_BLEND; + } if (pending->rotation & DRM_MODE_REFLECT_Y) { con |= OVL_CON_VIRT_FLIP; @@ -663,6 +690,9 @@ static const struct mtk_disp_ovl_data mt8192_ovl_driver_data = { .layer_nr = 4, .fmt_rgb565_is_0 = true, .smi_id_en = true, + .blend_modes = BIT(DRM_MODE_BLEND_PREMULTI) | + BIT(DRM_MODE_BLEND_COVERAGE) | + BIT(DRM_MODE_BLEND_PIXEL_NONE), .formats = mt8173_formats, .num_formats = ARRAY_SIZE(mt8173_formats), }; @@ -673,6 +703,9 @@ static const struct mtk_disp_ovl_data mt8192_ovl_2l_driver_data = { .layer_nr = 2, .fmt_rgb565_is_0 = true, .smi_id_en = true, + .blend_modes = BIT(DRM_MODE_BLEND_PREMULTI) | + BIT(DRM_MODE_BLEND_COVERAGE) | + BIT(DRM_MODE_BLEND_PIXEL_NONE), .formats = mt8173_formats, .num_formats = ARRAY_SIZE(mt8173_formats), }; @@ -684,6 +717,9 @@ static const struct mtk_disp_ovl_data mt8195_ovl_driver_data = { .fmt_rgb565_is_0 = true, .smi_id_en = true, .supports_afbc = true, + .blend_modes = BIT(DRM_MODE_BLEND_PREMULTI) | + BIT(DRM_MODE_BLEND_COVERAGE) | + BIT(DRM_MODE_BLEND_PIXEL_NONE), .formats = mt8195_formats, .num_formats = ARRAY_SIZE(mt8195_formats), .supports_clrfmt_ext = true, @@ -710,7 +746,7 @@ MODULE_DEVICE_TABLE(of, mtk_disp_ovl_driver_dt_match); struct platform_driver mtk_disp_ovl_driver = { .probe = mtk_disp_ovl_probe, - .remove_new = mtk_disp_ovl_remove, + .remove = mtk_disp_ovl_remove, .driver = { .name = "mediatek-disp-ovl", .of_match_table = mtk_disp_ovl_driver_dt_match, diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c index c6768210b08b..fa0e95dd29a0 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c @@ -400,6 +400,13 @@ void mtk_ovl_adaptor_disable_vblank(struct device *dev) mtk_ethdr_disable_vblank(ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_ETHDR0]); } +u32 mtk_ovl_adaptor_get_blend_modes(struct device *dev) +{ + struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev); + + return mtk_ethdr_get_blend_modes(ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_ETHDR0]); +} + const u32 *mtk_ovl_adaptor_get_formats(struct device *dev) { struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev); @@ -490,6 +497,41 @@ static int compare_of(struct device *dev, void *data) return dev->of_node == data; } +static int ovl_adaptor_of_get_ddp_comp_type(struct device_node *node, + enum mtk_ovl_adaptor_comp_type *ctype) +{ + const struct of_device_id *of_id = of_match_node(mtk_ovl_adaptor_comp_dt_ids, node); + + if (!of_id) + return -EINVAL; + + *ctype = (enum mtk_ovl_adaptor_comp_type)((uintptr_t)of_id->data); + + return 0; +} + +bool mtk_ovl_adaptor_is_comp_present(struct device_node *node) +{ + enum mtk_ovl_adaptor_comp_type type; + int ret; + + ret = ovl_adaptor_of_get_ddp_comp_type(node, &type); + if (ret) + return false; + + if (type >= OVL_ADAPTOR_TYPE_NUM) + return false; + + /* + * In the context of mediatek-drm, ETHDR, MDP_RDMA and Padding are + * used exclusively by OVL Adaptor: if this component is not one of + * those, it's likely not an OVL Adaptor path. + */ + return type == OVL_ADAPTOR_TYPE_ETHDR || + type == OVL_ADAPTOR_TYPE_MDP_RDMA || + type == OVL_ADAPTOR_TYPE_PADDING; +} + static int ovl_adaptor_comp_init(struct device *dev, struct component_match **match) { struct mtk_disp_ovl_adaptor *priv = dev_get_drvdata(dev); @@ -499,12 +541,11 @@ static int ovl_adaptor_comp_init(struct device *dev, struct component_match **ma parent = dev->parent->parent->of_node->parent; for_each_child_of_node_scoped(parent, node) { - const struct of_device_id *of_id; enum mtk_ovl_adaptor_comp_type type; - int id; + int id, ret; - of_id = of_match_node(mtk_ovl_adaptor_comp_dt_ids, node); - if (!of_id) + ret = ovl_adaptor_of_get_ddp_comp_type(node, &type); + if (ret) continue; if (!of_device_is_available(node)) { @@ -513,7 +554,6 @@ static int ovl_adaptor_comp_init(struct device *dev, struct component_match **ma continue; } - type = (enum mtk_ovl_adaptor_comp_type)(uintptr_t)of_id->data; id = ovl_adaptor_comp_get_id(dev, node, type); if (id < 0) { dev_warn(dev, "Skipping unknown component %pOF\n", @@ -625,7 +665,7 @@ static void mtk_disp_ovl_adaptor_remove(struct platform_device *pdev) struct platform_driver mtk_disp_ovl_adaptor_driver = { .probe = mtk_disp_ovl_adaptor_probe, - .remove_new = mtk_disp_ovl_adaptor_remove, + .remove = mtk_disp_ovl_adaptor_remove, .driver = { .name = "mediatek-disp-ovl-adaptor", }, diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c index 07243f372260..bf47790e4d6b 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c @@ -417,7 +417,7 @@ MODULE_DEVICE_TABLE(of, mtk_disp_rdma_driver_dt_match); struct platform_driver mtk_disp_rdma_driver = { .probe = mtk_disp_rdma_probe, - .remove_new = mtk_disp_rdma_remove, + .remove = mtk_disp_rdma_remove, .driver = { .name = "mediatek-disp-rdma", .of_match_table = mtk_disp_rdma_driver_dt_match, diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c index d8796a904eca..36713c176cfc 100644 --- a/drivers/gpu/drm/mediatek/mtk_dp.c +++ b/drivers/gpu/drm/mediatek/mtk_dp.c @@ -145,6 +145,89 @@ struct mtk_dp_data { u16 audio_m_div2_bit; }; +static const struct mtk_dp_efuse_fmt mt8188_dp_efuse_fmt[MTK_DP_CAL_MAX] = { + [MTK_DP_CAL_GLB_BIAS_TRIM] = { + .idx = 0, + .shift = 10, + .mask = 0x1f, + .min_val = 1, + .max_val = 0x1e, + .default_val = 0xf, + }, + [MTK_DP_CAL_CLKTX_IMPSE] = { + .idx = 0, + .shift = 15, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, + [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_0] = { + .idx = 1, + .shift = 0, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, + [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_1] = { + .idx = 1, + .shift = 8, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, + [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_2] = { + .idx = 1, + .shift = 16, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, + [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_3] = { + .idx = 1, + .shift = 24, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, + [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_0] = { + .idx = 1, + .shift = 4, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, + [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_1] = { + .idx = 1, + .shift = 12, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, + [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_2] = { + .idx = 1, + .shift = 20, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, + [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_3] = { + .idx = 1, + .shift = 28, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, +}; + static const struct mtk_dp_efuse_fmt mt8195_edp_efuse_fmt[MTK_DP_CAL_MAX] = { [MTK_DP_CAL_GLB_BIAS_TRIM] = { .idx = 3, @@ -311,7 +394,7 @@ static const struct mtk_dp_efuse_fmt mt8195_dp_efuse_fmt[MTK_DP_CAL_MAX] = { }, }; -static struct regmap_config mtk_dp_regmap_config = { +static const struct regmap_config mtk_dp_regmap_config = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, @@ -2771,7 +2854,7 @@ static SIMPLE_DEV_PM_OPS(mtk_dp_pm_ops, mtk_dp_suspend, mtk_dp_resume); static const struct mtk_dp_data mt8188_dp_data = { .bridge_type = DRM_MODE_CONNECTOR_DisplayPort, .smc_cmd = MTK_DP_SIP_ATF_VIDEO_UNMUTE, - .efuse_fmt = mt8195_dp_efuse_fmt, + .efuse_fmt = mt8188_dp_efuse_fmt, .audio_supported = true, .audio_pkt_in_hblank_area = true, .audio_m_div2_bit = MT8188_AUDIO_M_CODE_MULT_DIV_SEL_DP_ENC0_P0_DIV_2, @@ -2816,7 +2899,7 @@ MODULE_DEVICE_TABLE(of, mtk_dp_of_match); static struct platform_driver mtk_dp_driver = { .probe = mtk_dp_probe, - .remove_new = mtk_dp_remove, + .remove = mtk_dp_remove, .driver = { .name = "mediatek-drm-dp", .of_match_table = mtk_dp_of_match, diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c index a08d20654954..1864eb02dbf5 100644 --- a/drivers/gpu/drm/mediatek/mtk_dpi.c +++ b/drivers/gpu/drm/mediatek/mtk_dpi.c @@ -704,6 +704,20 @@ static int mtk_dpi_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct mtk_dpi *dpi = bridge_to_dpi(bridge); + int ret; + + dpi->next_bridge = devm_drm_of_get_bridge(dpi->dev, dpi->dev->of_node, 1, -1); + if (IS_ERR(dpi->next_bridge)) { + ret = PTR_ERR(dpi->next_bridge); + if (ret == -EPROBE_DEFER) + return ret; + + /* Old devicetree has only one endpoint */ + dpi->next_bridge = devm_drm_of_get_bridge(dpi->dev, dpi->dev->of_node, 0, 0); + if (IS_ERR(dpi->next_bridge)) + return dev_err_probe(dpi->dev, PTR_ERR(dpi->next_bridge), + "Failed to get bridge\n"); + } return drm_bridge_attach(bridge->encoder, dpi->next_bridge, &dpi->bridge, flags); @@ -1058,13 +1072,6 @@ static int mtk_dpi_probe(struct platform_device *pdev) if (dpi->irq < 0) return dpi->irq; - dpi->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0); - if (IS_ERR(dpi->next_bridge)) - return dev_err_probe(dev, PTR_ERR(dpi->next_bridge), - "Failed to get bridge\n"); - - dev_info(dev, "Found bridge node: %pOF\n", dpi->next_bridge->of_node); - platform_set_drvdata(pdev, dpi); dpi->bridge.funcs = &mtk_dpi_bridge_funcs; @@ -1101,7 +1108,7 @@ MODULE_DEVICE_TABLE(of, mtk_dpi_of_ids); struct platform_driver mtk_dpi_driver = { .probe = mtk_dpi_probe, - .remove_new = mtk_dpi_remove, + .remove = mtk_dpi_remove, .driver = { .name = "mediatek-dpi", .of_match_table = mtk_dpi_of_ids, diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index a4594f8873d5..0829ceb9967c 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -27,6 +27,7 @@ #include "mtk_crtc.h" #include "mtk_ddp_comp.h" +#include "mtk_disp_drv.h" #include "mtk_drm_drv.h" #include "mtk_gem.h" @@ -372,12 +373,11 @@ static bool mtk_drm_get_all_drm_priv(struct device *dev) struct mtk_drm_private *temp_drm_priv; struct device_node *phandle = dev->parent->of_node; const struct of_device_id *of_id; - struct device_node *node; struct device *drm_dev; unsigned int cnt = 0; int i, j; - for_each_child_of_node(phandle->parent, node) { + for_each_child_of_node_scoped(phandle->parent, node) { struct platform_device *pdev; of_id = of_match_node(mtk_drm_of_ids, node); @@ -820,12 +820,235 @@ static const struct of_device_id mtk_ddp_comp_dt_ids[] = { { } }; +static int mtk_drm_of_get_ddp_comp_type(struct device_node *node, enum mtk_ddp_comp_type *ctype) +{ + const struct of_device_id *of_id = of_match_node(mtk_ddp_comp_dt_ids, node); + + if (!of_id) + return -EINVAL; + + *ctype = (enum mtk_ddp_comp_type)((uintptr_t)of_id->data); + + return 0; +} + +static int mtk_drm_of_get_ddp_ep_cid(struct device_node *node, + int output_port, enum mtk_crtc_path crtc_path, + struct device_node **next, unsigned int *cid) +{ + struct device_node *ep_dev_node, *ep_out; + enum mtk_ddp_comp_type comp_type; + int ret; + + ep_out = of_graph_get_endpoint_by_regs(node, output_port, crtc_path); + if (!ep_out) + return -ENOENT; + + ep_dev_node = of_graph_get_remote_port_parent(ep_out); + of_node_put(ep_out); + if (!ep_dev_node) + return -EINVAL; + + /* + * Pass the next node pointer regardless of failures in the later code + * so that if this function is called in a loop it will walk through all + * of the subsequent endpoints anyway. + */ + *next = ep_dev_node; + + if (!of_device_is_available(ep_dev_node)) + return -ENODEV; + + ret = mtk_drm_of_get_ddp_comp_type(ep_dev_node, &comp_type); + if (ret) { + if (mtk_ovl_adaptor_is_comp_present(ep_dev_node)) { + *cid = (unsigned int)DDP_COMPONENT_DRM_OVL_ADAPTOR; + return 0; + } + return ret; + } + + ret = mtk_ddp_comp_get_id(ep_dev_node, comp_type); + if (ret < 0) + return ret; + + /* All ok! Pass the Component ID to the caller. */ + *cid = (unsigned int)ret; + + return 0; +} + +/** + * mtk_drm_of_ddp_path_build_one - Build a Display HW Pipeline for a CRTC Path + * @dev: The mediatek-drm device + * @cpath: CRTC Path relative to a VDO or MMSYS + * @out_path: Pointer to an array that will contain the new pipeline + * @out_path_len: Number of entries in the pipeline array + * + * MediaTek SoCs can use different DDP hardware pipelines (or paths) depending + * on the board-specific desired display configuration; this function walks + * through all of the output endpoints starting from a VDO or MMSYS hardware + * instance and builds the right pipeline as specified in device trees. + * + * Return: + * * %0 - Display HW Pipeline successfully built and validated + * * %-ENOENT - Display pipeline was not specified in device tree + * * %-EINVAL - Display pipeline built but validation failed + * * %-ENOMEM - Failure to allocate pipeline array to pass to the caller + */ +static int mtk_drm_of_ddp_path_build_one(struct device *dev, enum mtk_crtc_path cpath, + const unsigned int **out_path, + unsigned int *out_path_len) +{ + struct device_node *next, *prev, *vdo = dev->parent->of_node; + unsigned int temp_path[DDP_COMPONENT_DRM_ID_MAX] = { 0 }; + unsigned int *final_ddp_path; + unsigned short int idx = 0; + bool ovl_adaptor_comp_added = false; + int ret; + + /* Get the first entry for the temp_path array */ + ret = mtk_drm_of_get_ddp_ep_cid(vdo, 0, cpath, &next, &temp_path[idx]); + if (ret) { + if (next && temp_path[idx] == DDP_COMPONENT_DRM_OVL_ADAPTOR) { + dev_dbg(dev, "Adding OVL Adaptor for %pOF\n", next); + ovl_adaptor_comp_added = true; + } else { + if (next) + dev_err(dev, "Invalid component %pOF\n", next); + else + dev_err(dev, "Cannot find first endpoint for path %d\n", cpath); + + return ret; + } + } + idx++; + + /* + * Walk through port outputs until we reach the last valid mediatek-drm component. + * To be valid, this must end with an "invalid" component that is a display node. + */ + do { + prev = next; + ret = mtk_drm_of_get_ddp_ep_cid(next, 1, cpath, &next, &temp_path[idx]); + of_node_put(prev); + if (ret) { + of_node_put(next); + break; + } + + /* + * If this is an OVL adaptor exclusive component and one of those + * was already added, don't add another instance of the generic + * DDP_COMPONENT_OVL_ADAPTOR, as this is used only to decide whether + * to probe that component master driver of which only one instance + * is needed and possible. + */ + if (temp_path[idx] == DDP_COMPONENT_DRM_OVL_ADAPTOR) { + if (!ovl_adaptor_comp_added) + ovl_adaptor_comp_added = true; + else + idx--; + } + } while (++idx < DDP_COMPONENT_DRM_ID_MAX); + + /* + * The device component might not be enabled: in that case, don't + * check the last entry and just report that the device is missing. + */ + if (ret == -ENODEV) + return ret; + + /* If the last entry is not a final display output, the configuration is wrong */ + switch (temp_path[idx - 1]) { + case DDP_COMPONENT_DP_INTF0: + case DDP_COMPONENT_DP_INTF1: + case DDP_COMPONENT_DPI0: + case DDP_COMPONENT_DPI1: + case DDP_COMPONENT_DSI0: + case DDP_COMPONENT_DSI1: + case DDP_COMPONENT_DSI2: + case DDP_COMPONENT_DSI3: + break; + default: + dev_err(dev, "Invalid display hw pipeline. Last component: %d (ret=%d)\n", + temp_path[idx - 1], ret); + return -EINVAL; + } + + final_ddp_path = devm_kmemdup(dev, temp_path, idx * sizeof(temp_path[0]), GFP_KERNEL); + if (!final_ddp_path) + return -ENOMEM; + + dev_dbg(dev, "Display HW Pipeline built with %d components.\n", idx); + + /* Pipeline built! */ + *out_path = final_ddp_path; + *out_path_len = idx; + + return 0; +} + +static int mtk_drm_of_ddp_path_build(struct device *dev, struct device_node *node, + struct mtk_mmsys_driver_data *data) +{ + struct device_node *ep_node; + struct of_endpoint of_ep; + bool output_present[MAX_CRTC] = { false }; + int ret; + + for_each_endpoint_of_node(node, ep_node) { + ret = of_graph_parse_endpoint(ep_node, &of_ep); + if (ret) { + dev_err_probe(dev, ret, "Cannot parse endpoint\n"); + break; + } + + if (of_ep.id >= MAX_CRTC) { + ret = dev_err_probe(dev, -EINVAL, + "Invalid endpoint%u number\n", of_ep.port); + break; + } + + output_present[of_ep.id] = true; + } + + if (ret) { + of_node_put(ep_node); + return ret; + } + + if (output_present[CRTC_MAIN]) { + ret = mtk_drm_of_ddp_path_build_one(dev, CRTC_MAIN, + &data->main_path, &data->main_len); + if (ret && ret != -ENODEV) + return ret; + } + + if (output_present[CRTC_EXT]) { + ret = mtk_drm_of_ddp_path_build_one(dev, CRTC_EXT, + &data->ext_path, &data->ext_len); + if (ret && ret != -ENODEV) + return ret; + } + + if (output_present[CRTC_THIRD]) { + ret = mtk_drm_of_ddp_path_build_one(dev, CRTC_THIRD, + &data->third_path, &data->third_len); + if (ret && ret != -ENODEV) + return ret; + } + + return 0; +} + static int mtk_drm_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *phandle = dev->parent->of_node; const struct of_device_id *of_id; struct mtk_drm_private *private; + struct mtk_mmsys_driver_data *mtk_drm_data; struct device_node *node; struct component_match *match = NULL; struct platform_device *ovl_adaptor; @@ -846,7 +1069,27 @@ static int mtk_drm_probe(struct platform_device *pdev) if (!of_id) return -ENODEV; - private->data = of_id->data; + mtk_drm_data = (struct mtk_mmsys_driver_data *)of_id->data; + if (!mtk_drm_data) + return -EINVAL; + + /* Try to build the display pipeline from devicetree graphs */ + if (of_graph_is_present(phandle)) { + dev_dbg(dev, "Building display pipeline for MMSYS %u\n", + mtk_drm_data->mmsys_id); + private->data = devm_kmemdup(dev, mtk_drm_data, + sizeof(*mtk_drm_data), GFP_KERNEL); + if (!private->data) + return -ENOMEM; + + ret = mtk_drm_of_ddp_path_build(dev, phandle, private->data); + if (ret) + return ret; + } else { + /* No devicetree graphs support: go with hardcoded paths if present */ + dev_dbg(dev, "Using hardcoded paths for MMSYS %u\n", mtk_drm_data->mmsys_id); + private->data = mtk_drm_data; + }; private->all_drm_private = devm_kmalloc_array(dev, private->data->mmsys_dev_num, sizeof(*private->all_drm_private), @@ -868,12 +1111,11 @@ static int mtk_drm_probe(struct platform_device *pdev) /* Iterate over sibling DISP function blocks */ for_each_child_of_node(phandle->parent, node) { - const struct of_device_id *of_id; enum mtk_ddp_comp_type comp_type; int comp_id; - of_id = of_match_node(mtk_ddp_comp_dt_ids, node); - if (!of_id) + ret = mtk_drm_of_get_ddp_comp_type(node, &comp_type); + if (ret) continue; if (!of_device_is_available(node)) { @@ -882,8 +1124,6 @@ static int mtk_drm_probe(struct platform_device *pdev) continue; } - comp_type = (enum mtk_ddp_comp_type)(uintptr_t)of_id->data; - if (comp_type == MTK_DISP_MUTEX) { int id; @@ -1011,7 +1251,7 @@ static const struct dev_pm_ops mtk_drm_pm_ops = { static struct platform_driver mtk_drm_platform_driver = { .probe = mtk_drm_probe, - .remove_new = mtk_drm_remove, + .remove = mtk_drm_remove, .shutdown = mtk_drm_shutdown, .driver = { .name = "mediatek-drm", diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h index ce897984de51..675cdc90a440 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h @@ -63,7 +63,7 @@ struct mtk_drm_private { struct device *mmsys_dev; struct device_node *comp_node[DDP_COMPONENT_DRM_ID_MAX]; struct mtk_ddp_comp ddp_comp[DDP_COMPONENT_DRM_ID_MAX]; - const struct mtk_mmsys_driver_data *data; + struct mtk_mmsys_driver_data *data; struct drm_atomic_state *suspend_state; unsigned int mbox_index; struct mtk_drm_private **all_drm_private; diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index eeec641cab60..e61b9bc68e9a 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -988,9 +988,17 @@ static int mtk_dsi_host_attach(struct mipi_dsi_host *host, dsi->lanes = device->lanes; dsi->format = device->format; dsi->mode_flags = device->mode_flags; - dsi->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0); - if (IS_ERR(dsi->next_bridge)) - return PTR_ERR(dsi->next_bridge); + dsi->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 1, 0); + if (IS_ERR(dsi->next_bridge)) { + ret = PTR_ERR(dsi->next_bridge); + if (ret == -EPROBE_DEFER) + return ret; + + /* Old devicetree has only one endpoint */ + dsi->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0); + if (IS_ERR(dsi->next_bridge)) + return PTR_ERR(dsi->next_bridge); + } drm_bridge_add(&dsi->bridge); @@ -1293,7 +1301,7 @@ MODULE_DEVICE_TABLE(of, mtk_dsi_of_match); struct platform_driver mtk_dsi_driver = { .probe = mtk_dsi_probe, - .remove_new = mtk_dsi_remove, + .remove = mtk_dsi_remove, .driver = { .name = "mtk-dsi", .of_match_table = mtk_dsi_of_match, diff --git a/drivers/gpu/drm/mediatek/mtk_ethdr.c b/drivers/gpu/drm/mediatek/mtk_ethdr.c index d1d9cf8b10e1..96832d0cca37 100644 --- a/drivers/gpu/drm/mediatek/mtk_ethdr.c +++ b/drivers/gpu/drm/mediatek/mtk_ethdr.c @@ -145,6 +145,13 @@ static irqreturn_t mtk_ethdr_irq_handler(int irq, void *dev_id) return IRQ_HANDLED; } +u32 mtk_ethdr_get_blend_modes(struct device *dev) +{ + return BIT(DRM_MODE_BLEND_PREMULTI) | + BIT(DRM_MODE_BLEND_COVERAGE) | + BIT(DRM_MODE_BLEND_PIXEL_NONE); +} + void mtk_ethdr_layer_config(struct device *dev, unsigned int idx, struct mtk_plane_state *state, struct cmdq_pkt *cmdq_pkt) @@ -381,7 +388,7 @@ MODULE_DEVICE_TABLE(of, mtk_ethdr_driver_dt_match); struct platform_driver mtk_ethdr_driver = { .probe = mtk_ethdr_probe, - .remove_new = mtk_ethdr_remove, + .remove = mtk_ethdr_remove, .driver = { .name = "mediatek-disp-ethdr", .of_match_table = mtk_ethdr_driver_dt_match, diff --git a/drivers/gpu/drm/mediatek/mtk_ethdr.h b/drivers/gpu/drm/mediatek/mtk_ethdr.h index 81af9edea3f7..a72aeee46829 100644 --- a/drivers/gpu/drm/mediatek/mtk_ethdr.h +++ b/drivers/gpu/drm/mediatek/mtk_ethdr.h @@ -13,6 +13,7 @@ void mtk_ethdr_clk_disable(struct device *dev); void mtk_ethdr_config(struct device *dev, unsigned int w, unsigned int h, unsigned int vrefresh, unsigned int bpc, struct cmdq_pkt *cmdq_pkt); +u32 mtk_ethdr_get_blend_modes(struct device *dev); void mtk_ethdr_layer_config(struct device *dev, unsigned int idx, struct mtk_plane_state *state, struct cmdq_pkt *cmdq_pkt); diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c index 7687f673964e..70dc1d4460ad 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c @@ -1795,7 +1795,7 @@ MODULE_DEVICE_TABLE(of, mtk_hdmi_of_ids); static struct platform_driver mtk_hdmi_driver = { .probe = mtk_hdmi_probe, - .remove_new = mtk_hdmi_remove, + .remove = mtk_hdmi_remove, .driver = { .name = "mediatek-drm-hdmi", .of_match_table = mtk_hdmi_of_ids, diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c index 8e60631d4cd2..07db68067844 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c @@ -331,7 +331,7 @@ MODULE_DEVICE_TABLE(of, mtk_hdmi_ddc_match); struct platform_driver mtk_hdmi_ddc_driver = { .probe = mtk_hdmi_ddc_probe, - .remove_new = mtk_hdmi_ddc_remove, + .remove = mtk_hdmi_ddc_remove, .driver = { .name = "mediatek-hdmi-ddc", .of_match_table = mtk_hdmi_ddc_match, diff --git a/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c b/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c index 7c1a8c796833..fc69ee38ce7d 100644 --- a/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c +++ b/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c @@ -341,7 +341,7 @@ MODULE_DEVICE_TABLE(of, mtk_mdp_rdma_driver_dt_match); struct platform_driver mtk_mdp_rdma_driver = { .probe = mtk_mdp_rdma_probe, - .remove_new = mtk_mdp_rdma_remove, + .remove = mtk_mdp_rdma_remove, .driver = { .name = "mediatek-mdp-rdma", .of_match_table = mtk_mdp_rdma_driver_dt_match, diff --git a/drivers/gpu/drm/mediatek/mtk_padding.c b/drivers/gpu/drm/mediatek/mtk_padding.c index 4bebd13a07bd..b4e3e5a3428b 100644 --- a/drivers/gpu/drm/mediatek/mtk_padding.c +++ b/drivers/gpu/drm/mediatek/mtk_padding.c @@ -146,7 +146,7 @@ MODULE_DEVICE_TABLE(of, mtk_padding_driver_dt_match); struct platform_driver mtk_padding_driver = { .probe = mtk_padding_probe, - .remove_new = mtk_padding_remove, + .remove = mtk_padding_remove, .driver = { .name = "mediatek-disp-padding", .of_match_table = mtk_padding_driver_dt_match, diff --git a/drivers/gpu/drm/mediatek/mtk_plane.c b/drivers/gpu/drm/mediatek/mtk_plane.c index 7d2cb4e0fafa..8a48b3b0a956 100644 --- a/drivers/gpu/drm/mediatek/mtk_plane.c +++ b/drivers/gpu/drm/mediatek/mtk_plane.c @@ -320,8 +320,8 @@ static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = { int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane, unsigned long possible_crtcs, enum drm_plane_type type, - unsigned int supported_rotations, const u32 *formats, - size_t num_formats, unsigned int plane_idx) + unsigned int supported_rotations, const u32 blend_modes, + const u32 *formats, size_t num_formats, unsigned int plane_idx) { int err; @@ -366,12 +366,11 @@ int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane, if (err) DRM_ERROR("failed to create property: alpha\n"); - err = drm_plane_create_blend_mode_property(plane, - BIT(DRM_MODE_BLEND_PREMULTI) | - BIT(DRM_MODE_BLEND_COVERAGE) | - BIT(DRM_MODE_BLEND_PIXEL_NONE)); - if (err) - DRM_ERROR("failed to create property: blend_mode\n"); + if (blend_modes) { + err = drm_plane_create_blend_mode_property(plane, blend_modes); + if (err) + DRM_ERROR("failed to create property: blend_mode\n"); + } drm_plane_helper_add(plane, &mtk_plane_helper_funcs); diff --git a/drivers/gpu/drm/mediatek/mtk_plane.h b/drivers/gpu/drm/mediatek/mtk_plane.h index 5b177eac67b7..3b13b89989c7 100644 --- a/drivers/gpu/drm/mediatek/mtk_plane.h +++ b/drivers/gpu/drm/mediatek/mtk_plane.h @@ -48,6 +48,6 @@ to_mtk_plane_state(struct drm_plane_state *state) int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane, unsigned long possible_crtcs, enum drm_plane_type type, - unsigned int supported_rotations, const u32 *formats, - size_t num_formats, unsigned int plane_idx); + unsigned int supported_rotations, const u32 blend_modes, + const u32 *formats, size_t num_formats, unsigned int plane_idx); #endif diff --git a/drivers/gpu/drm/meson/Kconfig b/drivers/gpu/drm/meson/Kconfig index b410e0d8015a..417f79829cf8 100644 --- a/drivers/gpu/drm/meson/Kconfig +++ b/drivers/gpu/drm/meson/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only config DRM_MESON tristate "DRM Support for Amlogic Meson Display Controller" - depends on DRM && OF && (ARM || ARM64) + depends on DRM && OF && (ARM || ARM64 || COMPILE_TEST) depends on ARCH_MESON || COMPILE_TEST select DRM_CLIENT_SELECTION select DRM_KMS_HELPER diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index 2f76f48da38d..0f5a1a54544e 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c @@ -8,6 +8,7 @@ * Jasper St. Pierre <jstpierre@mecheye.net> */ +#include <linux/aperture.h> #include <linux/component.h> #include <linux/module.h> #include <linux/of_graph.h> @@ -15,7 +16,6 @@ #include <linux/platform_device.h> #include <linux/soc/amlogic/meson-canvas.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_client_setup.h> #include <drm/drm_drv.h> @@ -128,7 +128,7 @@ static bool meson_vpu_has_available_connectors(struct device *dev) return false; } -static struct regmap_config meson_regmap_config = { +static const struct regmap_config meson_regmap_config = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, @@ -279,7 +279,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) * Remove early framebuffers (ie. simplefb). The framebuffer can be * located anywhere in RAM */ - ret = drm_aperture_remove_framebuffers(&meson_driver); + ret = aperture_remove_all_conflicting_devices(meson_driver.name); if (ret) goto free_canvas_vd1_2; @@ -559,7 +559,7 @@ static const struct dev_pm_ops meson_drv_pm_ops = { static struct platform_driver meson_drm_platform_driver = { .probe = meson_drv_probe, - .remove_new = meson_drv_remove, + .remove = meson_drv_remove, .shutdown = meson_drv_shutdown, .driver = { .name = "meson-drm", diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c index 5565f7777529..0d7c68b29dff 100644 --- a/drivers/gpu/drm/meson/meson_dw_hdmi.c +++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c @@ -272,20 +272,6 @@ static inline void dw_hdmi_g12a_dwc_write(struct meson_dw_hdmi *dw_hdmi, writeb(data, dw_hdmi->hdmitx + addr); } -/* Helper to change specific bits in controller registers */ -static inline void dw_hdmi_dwc_write_bits(struct meson_dw_hdmi *dw_hdmi, - unsigned int addr, - unsigned int mask, - unsigned int val) -{ - unsigned int data = dw_hdmi->data->dwc_read(dw_hdmi, addr); - - data &= ~mask; - data |= val; - - dw_hdmi->data->dwc_write(dw_hdmi, addr, data); -} - /* Bridge */ /* Setup PHY bandwidth modes */ @@ -870,7 +856,7 @@ MODULE_DEVICE_TABLE(of, meson_dw_hdmi_of_table); static struct platform_driver meson_dw_hdmi_platform_driver = { .probe = meson_dw_hdmi_probe, - .remove_new = meson_dw_hdmi_remove, + .remove = meson_dw_hdmi_remove, .driver = { .name = DRIVER_NAME, .of_match_table = meson_dw_hdmi_of_table, diff --git a/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c b/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c index a10cff3ca1fe..66c73c512b0e 100644 --- a/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c +++ b/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c @@ -345,7 +345,7 @@ MODULE_DEVICE_TABLE(of, meson_dw_mipi_dsi_of_table); static struct platform_driver meson_dw_mipi_dsi_platform_driver = { .probe = meson_dw_mipi_dsi_probe, - .remove_new = meson_dw_mipi_dsi_remove, + .remove = meson_dw_mipi_dsi_remove, .driver = { .name = DRIVER_NAME, .of_match_table = meson_dw_mipi_dsi_of_table, diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c index 29087e2398df..97fd7eb765b4 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.c +++ b/drivers/gpu/drm/mgag200/mgag200_drv.c @@ -6,10 +6,10 @@ * Dave Airlie */ +#include <linux/aperture.h> #include <linux/module.h> #include <linux/pci.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_client_setup.h> #include <drm/drm_drv.h> @@ -20,7 +20,6 @@ #include <drm/drm_managed.h> #include <drm/drm_module.h> #include <drm/drm_pciids.h> -#include <drm/drm_vblank.h> #include "mgag200_drv.h" @@ -87,34 +86,6 @@ resource_size_t mgag200_probe_vram(void __iomem *mem, resource_size_t size) return offset - 65536; } -static irqreturn_t mgag200_irq_handler(int irq, void *arg) -{ - struct drm_device *dev = arg; - struct mga_device *mdev = to_mga_device(dev); - struct drm_crtc *crtc; - u32 status, ien; - - status = RREG32(MGAREG_STATUS); - - if (status & MGAREG_STATUS_VLINEPEN) { - ien = RREG32(MGAREG_IEN); - if (!(ien & MGAREG_IEN_VLINEIEN)) - goto out; - - crtc = drm_crtc_from_index(dev, 0); - if (WARN_ON_ONCE(!crtc)) - goto out; - drm_crtc_handle_vblank(crtc); - - WREG32(MGAREG_ICLEAR, MGAREG_ICLEAR_VLINEICLR); - - return IRQ_HANDLED; - } - -out: - return IRQ_NONE; -} - /* * DRM driver */ @@ -199,7 +170,6 @@ int mgag200_device_init(struct mga_device *mdev, const struct mgag200_device_funcs *funcs) { struct drm_device *dev = &mdev->base; - struct pci_dev *pdev = to_pci_dev(dev->dev); u8 crtcext3, misc; int ret; @@ -226,14 +196,6 @@ int mgag200_device_init(struct mga_device *mdev, mutex_unlock(&mdev->rmmio_lock); WREG32(MGAREG_IEN, 0); - WREG32(MGAREG_ICLEAR, MGAREG_ICLEAR_VLINEICLR); - - ret = devm_request_irq(&pdev->dev, pdev->irq, mgag200_irq_handler, IRQF_SHARED, - dev->driver->name, dev); - if (ret) { - drm_err(dev, "Failed to acquire interrupt, error %d\n", ret); - return ret; - } return 0; } @@ -266,7 +228,7 @@ mgag200_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct drm_device *dev; int ret; - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &mgag200_driver); + ret = aperture_remove_conflicting_pci_devices(pdev, mgag200_driver.name); if (ret) return ret; diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h index 4760ba92871b..988967eafbf2 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.h +++ b/drivers/gpu/drm/mgag200/mgag200_drv.h @@ -391,24 +391,17 @@ int mgag200_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_st void mgag200_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *old_state); void mgag200_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *old_state); void mgag200_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *old_state); -bool mgag200_crtc_helper_get_scanout_position(struct drm_crtc *crtc, bool in_vblank_irq, - int *vpos, int *hpos, - ktime_t *stime, ktime_t *etime, - const struct drm_display_mode *mode); #define MGAG200_CRTC_HELPER_FUNCS \ .mode_valid = mgag200_crtc_helper_mode_valid, \ .atomic_check = mgag200_crtc_helper_atomic_check, \ .atomic_flush = mgag200_crtc_helper_atomic_flush, \ .atomic_enable = mgag200_crtc_helper_atomic_enable, \ - .atomic_disable = mgag200_crtc_helper_atomic_disable, \ - .get_scanout_position = mgag200_crtc_helper_get_scanout_position + .atomic_disable = mgag200_crtc_helper_atomic_disable void mgag200_crtc_reset(struct drm_crtc *crtc); struct drm_crtc_state *mgag200_crtc_atomic_duplicate_state(struct drm_crtc *crtc); void mgag200_crtc_atomic_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state); -int mgag200_crtc_enable_vblank(struct drm_crtc *crtc); -void mgag200_crtc_disable_vblank(struct drm_crtc *crtc); #define MGAG200_CRTC_FUNCS \ .reset = mgag200_crtc_reset, \ @@ -416,10 +409,7 @@ void mgag200_crtc_disable_vblank(struct drm_crtc *crtc); .set_config = drm_atomic_helper_set_config, \ .page_flip = drm_atomic_helper_page_flip, \ .atomic_duplicate_state = mgag200_crtc_atomic_duplicate_state, \ - .atomic_destroy_state = mgag200_crtc_atomic_destroy_state, \ - .enable_vblank = mgag200_crtc_enable_vblank, \ - .disable_vblank = mgag200_crtc_disable_vblank, \ - .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp + .atomic_destroy_state = mgag200_crtc_atomic_destroy_state void mgag200_set_mode_regs(struct mga_device *mdev, const struct drm_display_mode *mode, bool set_vidrst); diff --git a/drivers/gpu/drm/mgag200/mgag200_g200.c b/drivers/gpu/drm/mgag200/mgag200_g200.c index 77ce8d36cef0..f874e2949840 100644 --- a/drivers/gpu/drm/mgag200/mgag200_g200.c +++ b/drivers/gpu/drm/mgag200/mgag200_g200.c @@ -8,7 +8,6 @@ #include <drm/drm_drv.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_probe_helper.h> -#include <drm/drm_vblank.h> #include "mgag200_drv.h" @@ -404,9 +403,5 @@ struct mga_device *mgag200_g200_device_create(struct pci_dev *pdev, const struct drm_mode_config_reset(dev); drm_kms_helper_poll_init(dev); - ret = drm_vblank_init(dev, 1); - if (ret) - return ERR_PTR(ret); - return mdev; } diff --git a/drivers/gpu/drm/mgag200/mgag200_g200eh.c b/drivers/gpu/drm/mgag200/mgag200_g200eh.c index 09ced65c1d2f..e2305f8e00f8 100644 --- a/drivers/gpu/drm/mgag200/mgag200_g200eh.c +++ b/drivers/gpu/drm/mgag200/mgag200_g200eh.c @@ -8,7 +8,6 @@ #include <drm/drm_drv.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_probe_helper.h> -#include <drm/drm_vblank.h> #include "mgag200_drv.h" @@ -276,9 +275,5 @@ struct mga_device *mgag200_g200eh_device_create(struct pci_dev *pdev, const stru drm_mode_config_reset(dev); drm_kms_helper_poll_init(dev); - ret = drm_vblank_init(dev, 1); - if (ret) - return ERR_PTR(ret); - return mdev; } diff --git a/drivers/gpu/drm/mgag200/mgag200_g200eh3.c b/drivers/gpu/drm/mgag200/mgag200_g200eh3.c index 5daa469137bd..11ae76eb081d 100644 --- a/drivers/gpu/drm/mgag200/mgag200_g200eh3.c +++ b/drivers/gpu/drm/mgag200/mgag200_g200eh3.c @@ -7,7 +7,6 @@ #include <drm/drm_drv.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_probe_helper.h> -#include <drm/drm_vblank.h> #include "mgag200_drv.h" @@ -181,9 +180,5 @@ struct mga_device *mgag200_g200eh3_device_create(struct pci_dev *pdev, drm_mode_config_reset(dev); drm_kms_helper_poll_init(dev); - ret = drm_vblank_init(dev, 1); - if (ret) - return ERR_PTR(ret); - return mdev; } diff --git a/drivers/gpu/drm/mgag200/mgag200_g200er.c b/drivers/gpu/drm/mgag200/mgag200_g200er.c index 09cfffafe130..c20ed0ab50ec 100644 --- a/drivers/gpu/drm/mgag200/mgag200_g200er.c +++ b/drivers/gpu/drm/mgag200/mgag200_g200er.c @@ -8,7 +8,6 @@ #include <drm/drm_drv.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_probe_helper.h> -#include <drm/drm_vblank.h> #include "mgag200_drv.h" @@ -206,8 +205,6 @@ static void mgag200_g200er_crtc_helper_atomic_enable(struct drm_crtc *crtc, mgag200_crtc_set_gamma_linear(mdev, format); mgag200_enable_display(mdev); - - drm_crtc_vblank_on(crtc); } static const struct drm_crtc_helper_funcs mgag200_g200er_crtc_helper_funcs = { @@ -215,8 +212,7 @@ static const struct drm_crtc_helper_funcs mgag200_g200er_crtc_helper_funcs = { .atomic_check = mgag200_crtc_helper_atomic_check, .atomic_flush = mgag200_crtc_helper_atomic_flush, .atomic_enable = mgag200_g200er_crtc_helper_atomic_enable, - .atomic_disable = mgag200_crtc_helper_atomic_disable, - .get_scanout_position = mgag200_crtc_helper_get_scanout_position, + .atomic_disable = mgag200_crtc_helper_atomic_disable }; static const struct drm_crtc_funcs mgag200_g200er_crtc_funcs = { @@ -312,9 +308,5 @@ struct mga_device *mgag200_g200er_device_create(struct pci_dev *pdev, const stru drm_mode_config_reset(dev); drm_kms_helper_poll_init(dev); - ret = drm_vblank_init(dev, 1); - if (ret) - return ERR_PTR(ret); - return mdev; } diff --git a/drivers/gpu/drm/mgag200/mgag200_g200ev.c b/drivers/gpu/drm/mgag200/mgag200_g200ev.c index 3d48baa91d8b..78be964eb97c 100644 --- a/drivers/gpu/drm/mgag200/mgag200_g200ev.c +++ b/drivers/gpu/drm/mgag200/mgag200_g200ev.c @@ -8,7 +8,6 @@ #include <drm/drm_drv.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_probe_helper.h> -#include <drm/drm_vblank.h> #include "mgag200_drv.h" @@ -207,8 +206,6 @@ static void mgag200_g200ev_crtc_helper_atomic_enable(struct drm_crtc *crtc, mgag200_crtc_set_gamma_linear(mdev, format); mgag200_enable_display(mdev); - - drm_crtc_vblank_on(crtc); } static const struct drm_crtc_helper_funcs mgag200_g200ev_crtc_helper_funcs = { @@ -216,8 +213,7 @@ static const struct drm_crtc_helper_funcs mgag200_g200ev_crtc_helper_funcs = { .atomic_check = mgag200_crtc_helper_atomic_check, .atomic_flush = mgag200_crtc_helper_atomic_flush, .atomic_enable = mgag200_g200ev_crtc_helper_atomic_enable, - .atomic_disable = mgag200_crtc_helper_atomic_disable, - .get_scanout_position = mgag200_crtc_helper_get_scanout_position, + .atomic_disable = mgag200_crtc_helper_atomic_disable }; static const struct drm_crtc_funcs mgag200_g200ev_crtc_funcs = { @@ -317,9 +313,5 @@ struct mga_device *mgag200_g200ev_device_create(struct pci_dev *pdev, const stru drm_mode_config_reset(dev); drm_kms_helper_poll_init(dev); - ret = drm_vblank_init(dev, 1); - if (ret) - return ERR_PTR(ret); - return mdev; } diff --git a/drivers/gpu/drm/mgag200/mgag200_g200ew3.c b/drivers/gpu/drm/mgag200/mgag200_g200ew3.c index dabc778e64e8..31624c9ab7b7 100644 --- a/drivers/gpu/drm/mgag200/mgag200_g200ew3.c +++ b/drivers/gpu/drm/mgag200/mgag200_g200ew3.c @@ -7,7 +7,6 @@ #include <drm/drm_drv.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_probe_helper.h> -#include <drm/drm_vblank.h> #include "mgag200_drv.h" @@ -199,9 +198,5 @@ struct mga_device *mgag200_g200ew3_device_create(struct pci_dev *pdev, drm_mode_config_reset(dev); drm_kms_helper_poll_init(dev); - ret = drm_vblank_init(dev, 1); - if (ret) - return ERR_PTR(ret); - return mdev; } diff --git a/drivers/gpu/drm/mgag200/mgag200_g200se.c b/drivers/gpu/drm/mgag200/mgag200_g200se.c index 9dcbe8304271..7a32d3b1d226 100644 --- a/drivers/gpu/drm/mgag200/mgag200_g200se.c +++ b/drivers/gpu/drm/mgag200/mgag200_g200se.c @@ -8,7 +8,6 @@ #include <drm/drm_drv.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_probe_helper.h> -#include <drm/drm_vblank.h> #include "mgag200_drv.h" @@ -338,8 +337,6 @@ static void mgag200_g200se_crtc_helper_atomic_enable(struct drm_crtc *crtc, mgag200_crtc_set_gamma_linear(mdev, format); mgag200_enable_display(mdev); - - drm_crtc_vblank_on(crtc); } static const struct drm_crtc_helper_funcs mgag200_g200se_crtc_helper_funcs = { @@ -347,8 +344,7 @@ static const struct drm_crtc_helper_funcs mgag200_g200se_crtc_helper_funcs = { .atomic_check = mgag200_crtc_helper_atomic_check, .atomic_flush = mgag200_crtc_helper_atomic_flush, .atomic_enable = mgag200_g200se_crtc_helper_atomic_enable, - .atomic_disable = mgag200_crtc_helper_atomic_disable, - .get_scanout_position = mgag200_crtc_helper_get_scanout_position, + .atomic_disable = mgag200_crtc_helper_atomic_disable }; static const struct drm_crtc_funcs mgag200_g200se_crtc_funcs = { @@ -517,9 +513,5 @@ struct mga_device *mgag200_g200se_device_create(struct pci_dev *pdev, const stru drm_mode_config_reset(dev); drm_kms_helper_poll_init(dev); - ret = drm_vblank_init(dev, 1); - if (ret) - return ERR_PTR(ret); - return mdev; } diff --git a/drivers/gpu/drm/mgag200/mgag200_g200wb.c b/drivers/gpu/drm/mgag200/mgag200_g200wb.c index 83a24aedbf2f..a0e7b9ad46cd 100644 --- a/drivers/gpu/drm/mgag200/mgag200_g200wb.c +++ b/drivers/gpu/drm/mgag200/mgag200_g200wb.c @@ -8,7 +8,6 @@ #include <drm/drm_drv.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_probe_helper.h> -#include <drm/drm_vblank.h> #include "mgag200_drv.h" @@ -323,9 +322,5 @@ struct mga_device *mgag200_g200wb_device_create(struct pci_dev *pdev, const stru drm_mode_config_reset(dev); drm_kms_helper_poll_init(dev); - ret = drm_vblank_init(dev, 1); - if (ret) - return ERR_PTR(ret); - return mdev; } diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index 7159909aca1e..fb71658c3117 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -22,7 +22,6 @@ #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_panic.h> #include <drm/drm_print.h> -#include <drm/drm_vblank.h> #include "mgag200_ddc.h" #include "mgag200_drv.h" @@ -227,14 +226,7 @@ void mgag200_set_mode_regs(struct mga_device *mdev, const struct drm_display_mod vblkstr = mode->crtc_vblank_start; vblkend = vtotal + 1; - /* - * There's no VBLANK interrupt on Matrox chipsets, so we use - * the VLINE interrupt instead. It triggers when the current - * <linecomp> has been reached. For VBLANK, this is the first - * non-visible line at the bottom of the screen. Therefore, - * keep <linecomp> in sync with <vblkstr>. - */ - linecomp = vblkstr; + linecomp = vdispend; misc = RREG8(MGA_MISC_IN); @@ -645,8 +637,6 @@ void mgag200_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_s struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state); struct drm_device *dev = crtc->dev; struct mga_device *mdev = to_mga_device(dev); - struct drm_pending_vblank_event *event; - unsigned long flags; if (crtc_state->enable && crtc_state->color_mgmt_changed) { const struct drm_format_info *format = mgag200_crtc_state->format; @@ -656,18 +646,6 @@ void mgag200_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_s else mgag200_crtc_set_gamma_linear(mdev, format); } - - event = crtc->state->event; - if (event) { - crtc->state->event = NULL; - - spin_lock_irqsave(&dev->event_lock, flags); - if (drm_crtc_vblank_get(crtc) != 0) - drm_crtc_send_vblank_event(crtc, event); - else - drm_crtc_arm_vblank_event(crtc, event); - spin_unlock_irqrestore(&dev->event_lock, flags); - } } void mgag200_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *old_state) @@ -692,44 +670,15 @@ void mgag200_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_ mgag200_crtc_set_gamma_linear(mdev, format); mgag200_enable_display(mdev); - - drm_crtc_vblank_on(crtc); } void mgag200_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *old_state) { struct mga_device *mdev = to_mga_device(crtc->dev); - drm_crtc_vblank_off(crtc); - mgag200_disable_display(mdev); } -bool mgag200_crtc_helper_get_scanout_position(struct drm_crtc *crtc, bool in_vblank_irq, - int *vpos, int *hpos, - ktime_t *stime, ktime_t *etime, - const struct drm_display_mode *mode) -{ - struct mga_device *mdev = to_mga_device(crtc->dev); - u32 vcount; - - if (stime) - *stime = ktime_get(); - - if (vpos) { - vcount = RREG32(MGAREG_VCOUNT); - *vpos = vcount & GENMASK(11, 0); - } - - if (hpos) - *hpos = mode->htotal >> 1; // near middle of scanline on average - - if (etime) - *etime = ktime_get(); - - return true; -} - void mgag200_crtc_reset(struct drm_crtc *crtc) { struct mgag200_crtc_state *mgag200_crtc_state; @@ -774,30 +723,6 @@ void mgag200_crtc_atomic_destroy_state(struct drm_crtc *crtc, struct drm_crtc_st kfree(mgag200_crtc_state); } -int mgag200_crtc_enable_vblank(struct drm_crtc *crtc) -{ - struct mga_device *mdev = to_mga_device(crtc->dev); - u32 ien; - - WREG32(MGAREG_ICLEAR, MGAREG_ICLEAR_VLINEICLR); - - ien = RREG32(MGAREG_IEN); - ien |= MGAREG_IEN_VLINEIEN; - WREG32(MGAREG_IEN, ien); - - return 0; -} - -void mgag200_crtc_disable_vblank(struct drm_crtc *crtc) -{ - struct mga_device *mdev = to_mga_device(crtc->dev); - u32 ien; - - ien = RREG32(MGAREG_IEN); - ien &= ~(MGAREG_IEN_VLINEIEN); - WREG32(MGAREG_IEN, ien); -} - /* * Mode config */ diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index f16d7c00e089..7ec833b6d829 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -6,6 +6,7 @@ config DRM_MSM depends on ARCH_QCOM || SOC_IMX5 || COMPILE_TEST depends on COMMON_CLK depends on IOMMU_SUPPORT + depends on OF depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n depends on QCOM_OCMEM || QCOM_OCMEM=n depends on QCOM_LLCC || QCOM_LLCC=n diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 13110fcc46a8..f274d9430cc3 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -23,6 +23,7 @@ adreno-y := \ adreno/a6xx_gpu.o \ adreno/a6xx_gmu.o \ adreno/a6xx_hfi.o \ + adreno/a6xx_preempt.o \ adreno-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o \ @@ -210,6 +211,7 @@ DISPLAY_HEADERS = \ generated/mdp4.xml.h \ generated/mdp5.xml.h \ generated/mdp_common.xml.h \ + generated/mdss.xml.h \ generated/sfpb.xml.h $(addprefix $(obj)/,$(adreno-y)): $(addprefix $(obj)/,$(ADRENO_HEADERS)) diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c index 0dc255ddf5ce..379a3d346c30 100644 --- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c @@ -22,7 +22,7 @@ static void a2xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) break; case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: /* ignore if there has not been a ctx switch: */ - if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) + if (ring->cur_ctx_seqno == submit->queue->ctx->seqno) break; fallthrough; case MSM_SUBMIT_CMD_BUF: diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c index b46ff49f47cf..b6df115bb567 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c @@ -40,7 +40,7 @@ static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) break; case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: /* ignore if there has not been a ctx switch: */ - if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) + if (ring->cur_ctx_seqno == submit->queue->ctx->seqno) break; fallthrough; case MSM_SUBMIT_CMD_BUF: diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c index 8b4cdf95f445..50c490b492f0 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c @@ -34,7 +34,7 @@ static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) break; case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: /* ignore if there has not been a ctx switch: */ - if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) + if (ring->cur_ctx_seqno == submit->queue->ctx->seqno) break; fallthrough; case MSM_SUBMIT_CMD_BUF: diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index e09044930547..ee89db72e36e 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -77,7 +77,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit case MSM_SUBMIT_CMD_IB_TARGET_BUF: break; case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: - if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) + if (ring->cur_ctx_seqno == submit->queue->ctx->seqno) break; fallthrough; case MSM_SUBMIT_CMD_BUF: @@ -132,7 +132,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) unsigned int i, ibs = 0; if (IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) && submit->in_rb) { - gpu->cur_ctx_seqno = 0; + ring->cur_ctx_seqno = 0; a5xx_submit_in_rb(gpu, submit); return; } @@ -171,7 +171,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) case MSM_SUBMIT_CMD_IB_TARGET_BUF: break; case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: - if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) + if (ring->cur_ctx_seqno == submit->queue->ctx->seqno) break; fallthrough; case MSM_SUBMIT_CMD_BUF: diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c index 7705f8010484..6b91e0bd1514 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_power.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c @@ -307,7 +307,7 @@ int a5xx_power_init(struct msm_gpu *gpu) else if (adreno_is_a540(adreno_gpu)) a540_lm_setup(gpu); - /* Set up SP/TP power collpase */ + /* Set up SP/TP power collapse */ a5xx_pc_init(gpu); /* Start the GPMU */ diff --git a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c index 0312b6ee0356..0c560e84ad5a 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c @@ -973,6 +973,25 @@ static const struct adreno_info a6xx_gpus[] = { }, .address_space_size = SZ_16G, }, { + .chip_ids = ADRENO_CHIP_IDS(0x06060300), + .family = ADRENO_6XX_GEN4, + .fw = { + [ADRENO_FW_SQE] = "a660_sqe.fw", + [ADRENO_FW_GMU] = "a663_gmu.bin", + }, + .gmem = SZ_1M + SZ_512K, + .inactive_period = DRM_MSM_INACTIVE_PERIOD, + .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT | + ADRENO_QUIRK_HAS_HW_APRIV, + .init = a6xx_gpu_init, + .a6xx = &(const struct a6xx_info) { + .hwcg = a690_hwcg, + .protect = &a660_protect, + .gmu_cgc_mode = 0x00020200, + .prim_fifo_threshold = 0x00300200, + }, + .address_space_size = SZ_16G, + }, { .chip_ids = ADRENO_CHIP_IDS(0x06030500), .family = ADRENO_6XX_GEN4, .fw = { @@ -1281,6 +1300,28 @@ static const u32 a730_protect_regs[] = { }; DECLARE_ADRENO_PROTECT(a730_protect, 48); +static const uint32_t a7xx_pwrup_reglist_regs[] = { + REG_A6XX_UCHE_TRAP_BASE, + REG_A6XX_UCHE_TRAP_BASE + 1, + REG_A6XX_UCHE_WRITE_THRU_BASE, + REG_A6XX_UCHE_WRITE_THRU_BASE + 1, + REG_A6XX_UCHE_GMEM_RANGE_MIN, + REG_A6XX_UCHE_GMEM_RANGE_MIN + 1, + REG_A6XX_UCHE_GMEM_RANGE_MAX, + REG_A6XX_UCHE_GMEM_RANGE_MAX + 1, + REG_A6XX_UCHE_CACHE_WAYS, + REG_A6XX_UCHE_MODE_CNTL, + REG_A6XX_RB_NC_MODE_CNTL, + REG_A6XX_RB_CMP_DBG_ECO_CNTL, + REG_A7XX_GRAS_NC_MODE_CNTL, + REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE, + REG_A6XX_UCHE_GBIF_GX_CONFIG, + REG_A6XX_UCHE_CLIENT_PF, + REG_A6XX_TPL1_DBG_ECO_CNTL1, +}; + +DECLARE_ADRENO_REGLIST_LIST(a7xx_pwrup_reglist); + static const struct adreno_info a7xx_gpus[] = { { .chip_ids = ADRENO_CHIP_IDS(0x07000200), @@ -1315,15 +1356,18 @@ static const struct adreno_info a7xx_gpus[] = { .gmem = SZ_2M, .inactive_period = DRM_MSM_INACTIVE_PERIOD, .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT | - ADRENO_QUIRK_HAS_HW_APRIV, + ADRENO_QUIRK_HAS_HW_APRIV | + ADRENO_QUIRK_PREEMPTION, .init = a6xx_gpu_init, .zapfw = "a730_zap.mdt", .a6xx = &(const struct a6xx_info) { .hwcg = a730_hwcg, .protect = &a730_protect, + .pwrup_reglist = &a7xx_pwrup_reglist, .gmu_cgc_mode = 0x00020000, }, .address_space_size = SZ_16G, + .preempt_record_size = 2860 * SZ_1K, }, { .chip_ids = ADRENO_CHIP_IDS(0x43050a01), /* "C510v2" */ .family = ADRENO_7XX_GEN2, @@ -1334,16 +1378,19 @@ static const struct adreno_info a7xx_gpus[] = { .gmem = 3 * SZ_1M, .inactive_period = DRM_MSM_INACTIVE_PERIOD, .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT | - ADRENO_QUIRK_HAS_HW_APRIV, + ADRENO_QUIRK_HAS_HW_APRIV | + ADRENO_QUIRK_PREEMPTION, .init = a6xx_gpu_init, .zapfw = "a740_zap.mdt", .a6xx = &(const struct a6xx_info) { .hwcg = a740_hwcg, .protect = &a730_protect, + .pwrup_reglist = &a7xx_pwrup_reglist, .gmu_chipid = 0x7020100, .gmu_cgc_mode = 0x00020202, }, .address_space_size = SZ_16G, + .preempt_record_size = 4192 * SZ_1K, }, { .chip_ids = ADRENO_CHIP_IDS(0x43050c01), /* "C512v2" */ .family = ADRENO_7XX_GEN2, @@ -1354,15 +1401,18 @@ static const struct adreno_info a7xx_gpus[] = { .gmem = 3 * SZ_1M, .inactive_period = DRM_MSM_INACTIVE_PERIOD, .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT | - ADRENO_QUIRK_HAS_HW_APRIV, + ADRENO_QUIRK_HAS_HW_APRIV | + ADRENO_QUIRK_PREEMPTION, .init = a6xx_gpu_init, .a6xx = &(const struct a6xx_info) { .hwcg = a740_hwcg, .protect = &a730_protect, + .pwrup_reglist = &a7xx_pwrup_reglist, .gmu_chipid = 0x7050001, .gmu_cgc_mode = 0x00020202, }, .address_space_size = SZ_256G, + .preempt_record_size = 4192 * SZ_1K, }, { .chip_ids = ADRENO_CHIP_IDS(0x43051401), /* "C520v2" */ .family = ADRENO_7XX_GEN3, @@ -1373,15 +1423,18 @@ static const struct adreno_info a7xx_gpus[] = { .gmem = 3 * SZ_1M, .inactive_period = DRM_MSM_INACTIVE_PERIOD, .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT | - ADRENO_QUIRK_HAS_HW_APRIV, + ADRENO_QUIRK_HAS_HW_APRIV | + ADRENO_QUIRK_PREEMPTION, .init = a6xx_gpu_init, .zapfw = "gen70900_zap.mbn", .a6xx = &(const struct a6xx_info) { .protect = &a730_protect, + .pwrup_reglist = &a7xx_pwrup_reglist, .gmu_chipid = 0x7090100, .gmu_cgc_mode = 0x00020202, }, .address_space_size = SZ_16G, + .preempt_record_size = 3572 * SZ_1K, } }; DECLARE_ADRENO_GPULIST(a7xx); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index 37927bdd6fbe..14db7376c712 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -1522,15 +1522,13 @@ static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev, irq = platform_get_irq_byname(pdev, name); - ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH, name, gmu); + ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH | IRQF_NO_AUTOEN, name, gmu); if (ret) { DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s %d\n", name, ret); return ret; } - disable_irq(irq); - return irq; } diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h index 94b6c5cab6f4..b4a79f88ccf4 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h @@ -99,6 +99,7 @@ struct a6xx_gmu { struct completion pd_gate; struct qmp *qmp; + struct a6xx_hfi_msg_bw_table *bw_table; }; static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset) diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index 06cab2c6fd66..019610341df1 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -68,6 +68,8 @@ static void update_shadow_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) { + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); uint32_t wptr; unsigned long flags; @@ -81,12 +83,17 @@ static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) /* Make sure to wrap wptr if we need to */ wptr = get_wptr(ring); - spin_unlock_irqrestore(&ring->preempt_lock, flags); - - /* Make sure everything is posted before making a decision */ - mb(); + /* Update HW if this is the current ring and we are not in preempt*/ + if (!a6xx_in_preempt(a6xx_gpu)) { + if (a6xx_gpu->cur_ring == ring) + gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr); + else + ring->restore_wptr = true; + } else { + ring->restore_wptr = true; + } - gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr); + spin_unlock_irqrestore(&ring->preempt_lock, flags); } static void get_stats_counter(struct msm_ringbuffer *ring, u32 counter, @@ -101,20 +108,30 @@ static void get_stats_counter(struct msm_ringbuffer *ring, u32 counter, } static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu, - struct msm_ringbuffer *ring, struct msm_file_private *ctx) + struct msm_ringbuffer *ring, struct msm_gem_submit *submit) { bool sysprof = refcount_read(&a6xx_gpu->base.base.sysprof_active) > 1; + struct msm_file_private *ctx = submit->queue->ctx; struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; phys_addr_t ttbr; u32 asid; u64 memptr = rbmemptr(ring, ttbr0); - if (ctx->seqno == a6xx_gpu->base.base.cur_ctx_seqno) + if (ctx->seqno == ring->cur_ctx_seqno) return; if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid)) return; + if (adreno_gpu->info->family >= ADRENO_7XX_GEN1) { + /* Wait for previous submit to complete before continuing: */ + OUT_PKT7(ring, CP_WAIT_TIMESTAMP, 4); + OUT_RING(ring, 0); + OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence))); + OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence))); + OUT_RING(ring, submit->seqno - 1); + } + if (!sysprof) { if (!adreno_is_a7xx(adreno_gpu)) { /* Turn off protected mode to write to special registers */ @@ -138,12 +155,14 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu, /* * Write the new TTBR0 to the memstore. This is good for debugging. + * Needed for preemption */ - OUT_PKT7(ring, CP_MEM_WRITE, 4); + OUT_PKT7(ring, CP_MEM_WRITE, 5); OUT_RING(ring, CP_MEM_WRITE_0_ADDR_LO(lower_32_bits(memptr))); OUT_RING(ring, CP_MEM_WRITE_1_ADDR_HI(upper_32_bits(memptr))); OUT_RING(ring, lower_32_bits(ttbr)); - OUT_RING(ring, (asid << 16) | upper_32_bits(ttbr)); + OUT_RING(ring, upper_32_bits(ttbr)); + OUT_RING(ring, ctx->seqno); /* * Sync both threads after switching pagetables and enable BR only @@ -193,7 +212,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) struct msm_ringbuffer *ring = submit->ring; unsigned int i, ibs = 0; - a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx); + a6xx_set_pagetable(a6xx_gpu, ring, submit); get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0), rbmemptr_stats(ring, index, cpcycles_start)); @@ -219,7 +238,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) case MSM_SUBMIT_CMD_IB_TARGET_BUF: break; case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: - if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) + if (ring->cur_ctx_seqno == submit->queue->ctx->seqno) break; fallthrough; case MSM_SUBMIT_CMD_BUF: @@ -268,6 +287,46 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) a6xx_flush(gpu, ring); } +static void a6xx_emit_set_pseudo_reg(struct msm_ringbuffer *ring, + struct a6xx_gpu *a6xx_gpu, struct msm_gpu_submitqueue *queue) +{ + u64 preempt_postamble; + + OUT_PKT7(ring, CP_SET_PSEUDO_REG, 12); + + OUT_RING(ring, SMMU_INFO); + /* don't save SMMU, we write the record from the kernel instead */ + OUT_RING(ring, 0); + OUT_RING(ring, 0); + + /* privileged and non secure buffer save */ + OUT_RING(ring, NON_SECURE_SAVE_ADDR); + OUT_RING(ring, lower_32_bits( + a6xx_gpu->preempt_iova[ring->id])); + OUT_RING(ring, upper_32_bits( + a6xx_gpu->preempt_iova[ring->id])); + + /* user context buffer save, seems to be unnused by fw */ + OUT_RING(ring, NON_PRIV_SAVE_ADDR); + OUT_RING(ring, 0); + OUT_RING(ring, 0); + + OUT_RING(ring, COUNTER); + /* seems OK to set to 0 to disable it */ + OUT_RING(ring, 0); + OUT_RING(ring, 0); + + /* Emit postamble to clear perfcounters */ + preempt_postamble = a6xx_gpu->preempt_postamble_iova; + + OUT_PKT7(ring, CP_SET_AMBLE, 3); + OUT_RING(ring, lower_32_bits(preempt_postamble)); + OUT_RING(ring, upper_32_bits(preempt_postamble)); + OUT_RING(ring, CP_SET_AMBLE_2_DWORDS( + a6xx_gpu->preempt_postamble_len) | + CP_SET_AMBLE_2_TYPE(KMD_AMBLE_TYPE)); +} + static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) { unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT; @@ -283,7 +342,14 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) OUT_PKT7(ring, CP_THREAD_CONTROL, 1); OUT_RING(ring, CP_THREAD_CONTROL_0_SYNC_THREADS | CP_SET_THREAD_BR); - a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx); + a6xx_set_pagetable(a6xx_gpu, ring, submit); + + /* + * If preemption is enabled, then set the pseudo register for the save + * sequence + */ + if (gpu->nr_rings > 1) + a6xx_emit_set_pseudo_reg(ring, a6xx_gpu, submit->queue); get_stats_counter(ring, REG_A7XX_RBBM_PERFCTR_CP(0), rbmemptr_stats(ring, index, cpcycles_start)); @@ -296,8 +362,10 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) OUT_PKT7(ring, CP_SET_MARKER, 1); OUT_RING(ring, 0x101); /* IFPC disable */ - OUT_PKT7(ring, CP_SET_MARKER, 1); - OUT_RING(ring, 0x00d); /* IB1LIST start */ + if (submit->queue->flags & MSM_SUBMITQUEUE_ALLOW_PREEMPT) { + OUT_PKT7(ring, CP_SET_MARKER, 1); + OUT_RING(ring, 0x00d); /* IB1LIST start */ + } /* Submit the commands */ for (i = 0; i < submit->nr_cmds; i++) { @@ -305,7 +373,7 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) case MSM_SUBMIT_CMD_IB_TARGET_BUF: break; case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: - if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) + if (ring->cur_ctx_seqno == submit->queue->ctx->seqno) break; fallthrough; case MSM_SUBMIT_CMD_BUF: @@ -328,8 +396,10 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) update_shadow_rptr(gpu, ring); } - OUT_PKT7(ring, CP_SET_MARKER, 1); - OUT_RING(ring, 0x00e); /* IB1LIST end */ + if (submit->queue->flags & MSM_SUBMITQUEUE_ALLOW_PREEMPT) { + OUT_PKT7(ring, CP_SET_MARKER, 1); + OUT_RING(ring, 0x00e); /* IB1LIST end */ + } get_stats_counter(ring, REG_A7XX_RBBM_PERFCTR_CP(0), rbmemptr_stats(ring, index, cpcycles_end)); @@ -376,6 +446,8 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) OUT_RING(ring, upper_32_bits(rbmemptr(ring, bv_fence))); OUT_RING(ring, submit->seqno); + a6xx_gpu->last_seqno[ring->id] = submit->seqno; + /* write the ringbuffer timestamp */ OUT_PKT7(ring, CP_EVENT_WRITE, 4); OUT_RING(ring, CACHE_CLEAN | CP_EVENT_WRITE_0_IRQ | BIT(27)); @@ -389,10 +461,32 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) OUT_PKT7(ring, CP_SET_MARKER, 1); OUT_RING(ring, 0x100); /* IFPC enable */ + /* If preemption is enabled */ + if (gpu->nr_rings > 1) { + /* Yield the floor on command completion */ + OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4); + + /* + * If dword[2:1] are non zero, they specify an address for + * the CP to write the value of dword[3] to on preemption + * complete. Write 0 to skip the write + */ + OUT_RING(ring, 0x00); + OUT_RING(ring, 0x00); + /* Data value - not used if the address above is 0 */ + OUT_RING(ring, 0x01); + /* generate interrupt on preemption completion */ + OUT_RING(ring, 0x00); + } + + trace_msm_gpu_submit_flush(submit, gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER)); a6xx_flush(gpu, ring); + + /* Check to see if we need to start preemption */ + a6xx_preempt_trigger(gpu); } static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state) @@ -541,6 +635,15 @@ static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu) gpu->ubwc_config.macrotile_mode = 1; } + if (adreno_is_a663(gpu)) { + gpu->ubwc_config.highest_bank_bit = 13; + gpu->ubwc_config.amsbc = 1; + gpu->ubwc_config.rgb565_predicator = 1; + gpu->ubwc_config.uavflagprd_inv = 2; + gpu->ubwc_config.macrotile_mode = 1; + gpu->ubwc_config.ubwc_swizzle = 0x4; + } + if (adreno_is_7c3(gpu)) { gpu->ubwc_config.highest_bank_bit = 14; gpu->ubwc_config.amsbc = 1; @@ -599,6 +702,77 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu) adreno_gpu->ubwc_config.macrotile_mode); } +static void a7xx_patch_pwrup_reglist(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + const struct adreno_reglist_list *reglist; + void *ptr = a6xx_gpu->pwrup_reglist_ptr; + struct cpu_gpu_lock *lock = ptr; + u32 *dest = (u32 *)&lock->regs[0]; + int i; + + reglist = adreno_gpu->info->a6xx->pwrup_reglist; + + lock->gpu_req = lock->cpu_req = lock->turn = 0; + lock->ifpc_list_len = 0; + lock->preemption_list_len = reglist->count; + + /* + * For each entry in each of the lists, write the offset and the current + * register value into the GPU buffer + */ + for (i = 0; i < reglist->count; i++) { + *dest++ = reglist->regs[i]; + *dest++ = gpu_read(gpu, reglist->regs[i]); + } + + /* + * The overall register list is composed of + * 1. Static IFPC-only registers + * 2. Static IFPC + preemption registers + * 3. Dynamic IFPC + preemption registers (ex: perfcounter selects) + * + * The first two lists are static. Size of these lists are stored as + * number of pairs in ifpc_list_len and preemption_list_len + * respectively. With concurrent binning, Some of the perfcounter + * registers being virtualized, CP needs to know the pipe id to program + * the aperture inorder to restore the same. Thus, third list is a + * dynamic list with triplets as + * (<aperture, shifted 12 bits> <address> <data>), and the length is + * stored as number for triplets in dynamic_list_len. + */ + lock->dynamic_list_len = 0; +} + +static int a7xx_preempt_start(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + struct msm_ringbuffer *ring = gpu->rb[0]; + + if (gpu->nr_rings <= 1) + return 0; + + /* Turn CP protection off */ + OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1); + OUT_RING(ring, 0); + + a6xx_emit_set_pseudo_reg(ring, a6xx_gpu, NULL); + + /* Yield the floor on command completion */ + OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4); + OUT_RING(ring, 0x00); + OUT_RING(ring, 0x00); + OUT_RING(ring, 0x00); + /* Generate interrupt on preemption completion */ + OUT_RING(ring, 0x00); + + a6xx_flush(gpu, ring); + + return a6xx_idle(gpu, ring) ? 0 : -EINVAL; +} + static int a6xx_cp_init(struct msm_gpu *gpu) { struct msm_ringbuffer *ring = gpu->rb[0]; @@ -630,6 +804,8 @@ static int a6xx_cp_init(struct msm_gpu *gpu) static int a7xx_cp_init(struct msm_gpu *gpu) { + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); struct msm_ringbuffer *ring = gpu->rb[0]; u32 mask; @@ -667,11 +843,11 @@ static int a7xx_cp_init(struct msm_gpu *gpu) /* *Don't* send a power up reg list for concurrent binning (TODO) */ /* Lo address */ - OUT_RING(ring, 0x00000000); + OUT_RING(ring, lower_32_bits(a6xx_gpu->pwrup_reglist_iova)); /* Hi address */ - OUT_RING(ring, 0x00000000); + OUT_RING(ring, upper_32_bits(a6xx_gpu->pwrup_reglist_iova)); /* BIT(31) set => read the regs from the list */ - OUT_RING(ring, 0x00000000); + OUT_RING(ring, BIT(31)); a6xx_flush(gpu, ring); return a6xx_idle(gpu, ring) ? 0 : -EINVAL; @@ -795,6 +971,16 @@ static int a6xx_ucode_load(struct msm_gpu *gpu) msm_gem_object_set_name(a6xx_gpu->shadow_bo, "shadow"); } + a6xx_gpu->pwrup_reglist_ptr = msm_gem_kernel_new(gpu->dev, PAGE_SIZE, + MSM_BO_WC | MSM_BO_MAP_PRIV, + gpu->aspace, &a6xx_gpu->pwrup_reglist_bo, + &a6xx_gpu->pwrup_reglist_iova); + + if (IS_ERR(a6xx_gpu->pwrup_reglist_ptr)) + return PTR_ERR(a6xx_gpu->pwrup_reglist_ptr); + + msm_gem_object_set_name(a6xx_gpu->pwrup_reglist_bo, "pwrup_reglist"); + return 0; } @@ -854,6 +1040,7 @@ static int hw_init(struct msm_gpu *gpu) struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); struct a6xx_gmu *gmu = &a6xx_gpu->gmu; u64 gmem_range_min; + unsigned int i; int ret; if (!adreno_has_gmu_wrapper(adreno_gpu)) { @@ -1062,7 +1249,7 @@ static int hw_init(struct msm_gpu *gpu) if (adreno_is_a690(adreno_gpu)) gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, 0x90); /* Set dualQ + disable afull for A660 GPU */ - else if (adreno_is_a660(adreno_gpu)) + else if (adreno_is_a660(adreno_gpu) || adreno_is_a663(adreno_gpu)) gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, 0x66906); else if (adreno_is_a7xx(adreno_gpu)) gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, @@ -1124,22 +1311,32 @@ static int hw_init(struct msm_gpu *gpu) if (a6xx_gpu->shadow_bo) { gpu_write64(gpu, REG_A6XX_CP_RB_RPTR_ADDR, shadowptr(a6xx_gpu, gpu->rb[0])); + for (unsigned int i = 0; i < gpu->nr_rings; i++) + a6xx_gpu->shadow[i] = 0; } /* ..which means "always" on A7xx, also for BV shadow */ if (adreno_is_a7xx(adreno_gpu)) { gpu_write64(gpu, REG_A7XX_CP_BV_RB_RPTR_ADDR, - rbmemptr(gpu->rb[0], bv_fence)); + rbmemptr(gpu->rb[0], bv_rptr)); } + a6xx_preempt_hw_init(gpu); + /* Always come up on rb 0 */ a6xx_gpu->cur_ring = gpu->rb[0]; - gpu->cur_ctx_seqno = 0; + for (i = 0; i < gpu->nr_rings; i++) + gpu->rb[i]->cur_ctx_seqno = 0; /* Enable the SQE_to start the CP engine */ gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1); + if (adreno_is_a7xx(adreno_gpu) && !a6xx_gpu->pwrup_reglist_emitted) { + a7xx_patch_pwrup_reglist(gpu); + a6xx_gpu->pwrup_reglist_emitted = true; + } + ret = adreno_is_a7xx(adreno_gpu) ? a7xx_cp_init(gpu) : a6xx_cp_init(gpu); if (ret) goto out; @@ -1177,6 +1374,10 @@ static int hw_init(struct msm_gpu *gpu) out: if (adreno_has_gmu_wrapper(adreno_gpu)) return ret; + + /* Last step - yield the ringbuffer */ + a7xx_preempt_start(gpu); + /* * Tell the GMU that we are done touching the GPU and it can start power * management @@ -1554,8 +1755,13 @@ static irqreturn_t a6xx_irq(struct msm_gpu *gpu) if (status & A6XX_RBBM_INT_0_MASK_SWFUSEVIOLATION) a7xx_sw_fuse_violation_irq(gpu); - if (status & A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS) + if (status & A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS) { msm_gpu_retire(gpu); + a6xx_preempt_trigger(gpu); + } + + if (status & A6XX_RBBM_INT_0_MASK_CP_SW) + a6xx_preempt_irq(gpu); return IRQ_HANDLED; } @@ -2249,6 +2455,7 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) struct a6xx_gpu *a6xx_gpu; struct adreno_gpu *adreno_gpu; struct msm_gpu *gpu; + extern int enable_preemption; bool is_a7xx; int ret; @@ -2287,7 +2494,10 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) return ERR_PTR(ret); } - if (is_a7xx) + if ((enable_preemption == 1) || (enable_preemption == -1 && + (config->info->quirks & ADRENO_QUIRK_PREEMPTION))) + ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs_a7xx, 4); + else if (is_a7xx) ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs_a7xx, 1); else if (adreno_has_gmu_wrapper(adreno_gpu)) ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs_gmuwrapper, 1); @@ -2328,6 +2538,8 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) a6xx_fault_handler); a6xx_calc_ubwc_config(adreno_gpu); + /* Set up the preemption specific bits and pieces for each ringbuffer */ + a6xx_preempt_init(gpu); return gpu; } diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h index 0fb7febf70e7..4aceffb6aae8 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h @@ -12,15 +12,35 @@ extern bool hang_debug; +struct cpu_gpu_lock { + uint32_t gpu_req; + uint32_t cpu_req; + uint32_t turn; + union { + struct { + uint16_t list_length; + uint16_t list_offset; + }; + struct { + uint8_t ifpc_list_len; + uint8_t preemption_list_len; + uint16_t dynamic_list_len; + }; + }; + uint64_t regs[62]; +}; + /** * struct a6xx_info - a6xx specific information from device table * * @hwcg: hw clock gating register sequence * @protect: CP_PROTECT settings + * @pwrup_reglist pwrup reglist for preemption */ struct a6xx_info { const struct adreno_reglist *hwcg; const struct adreno_protect *protect; + const struct adreno_reglist_list *pwrup_reglist; u32 gmu_chipid; u32 gmu_cgc_mode; u32 prim_fifo_threshold; @@ -33,6 +53,29 @@ struct a6xx_gpu { uint64_t sqe_iova; struct msm_ringbuffer *cur_ring; + struct msm_ringbuffer *next_ring; + + struct drm_gem_object *preempt_bo[MSM_GPU_MAX_RINGS]; + void *preempt[MSM_GPU_MAX_RINGS]; + uint64_t preempt_iova[MSM_GPU_MAX_RINGS]; + struct drm_gem_object *preempt_smmu_bo[MSM_GPU_MAX_RINGS]; + void *preempt_smmu[MSM_GPU_MAX_RINGS]; + uint64_t preempt_smmu_iova[MSM_GPU_MAX_RINGS]; + uint32_t last_seqno[MSM_GPU_MAX_RINGS]; + + atomic_t preempt_state; + spinlock_t eval_lock; + struct timer_list preempt_timer; + + unsigned int preempt_level; + bool uses_gmem; + bool skip_save_restore; + + struct drm_gem_object *preempt_postamble_bo; + void *preempt_postamble_ptr; + uint64_t preempt_postamble_iova; + uint64_t preempt_postamble_len; + bool postamble_enabled; struct a6xx_gmu gmu; @@ -40,6 +83,11 @@ struct a6xx_gpu { uint64_t shadow_iova; uint32_t *shadow; + struct drm_gem_object *pwrup_reglist_bo; + void *pwrup_reglist_ptr; + uint64_t pwrup_reglist_iova; + bool pwrup_reglist_emitted; + bool has_whereami; void __iomem *llc_mmio; @@ -52,6 +100,100 @@ struct a6xx_gpu { #define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base) /* + * In order to do lockless preemption we use a simple state machine to progress + * through the process. + * + * PREEMPT_NONE - no preemption in progress. Next state START. + * PREEMPT_START - The trigger is evaluating if preemption is possible. Next + * states: TRIGGERED, NONE + * PREEMPT_FINISH - An intermediate state before moving back to NONE. Next + * state: NONE. + * PREEMPT_TRIGGERED: A preemption has been executed on the hardware. Next + * states: FAULTED, PENDING + * PREEMPT_FAULTED: A preemption timed out (never completed). This will trigger + * recovery. Next state: N/A + * PREEMPT_PENDING: Preemption complete interrupt fired - the callback is + * checking the success of the operation. Next state: FAULTED, NONE. + */ + +enum a6xx_preempt_state { + PREEMPT_NONE = 0, + PREEMPT_START, + PREEMPT_FINISH, + PREEMPT_TRIGGERED, + PREEMPT_FAULTED, + PREEMPT_PENDING, +}; + +/* + * struct a6xx_preempt_record is a shared buffer between the microcode and the + * CPU to store the state for preemption. The record itself is much larger + * (2112k) but most of that is used by the CP for storage. + * + * There is a preemption record assigned per ringbuffer. When the CPU triggers a + * preemption, it fills out the record with the useful information (wptr, ring + * base, etc) and the microcode uses that information to set up the CP following + * the preemption. When a ring is switched out, the CP will save the ringbuffer + * state back to the record. In this way, once the records are properly set up + * the CPU can quickly switch back and forth between ringbuffers by only + * updating a few registers (often only the wptr). + * + * These are the CPU aware registers in the record: + * @magic: Must always be 0xAE399D6EUL + * @info: Type of the record - written 0 by the CPU, updated by the CP + * @errno: preemption error record + * @data: Data field in YIELD and SET_MARKER packets, Written and used by CP + * @cntl: Value of RB_CNTL written by CPU, save/restored by CP + * @rptr: Value of RB_RPTR written by CPU, save/restored by CP + * @wptr: Value of RB_WPTR written by CPU, save/restored by CP + * @_pad: Reserved/padding + * @rptr_addr: Value of RB_RPTR_ADDR_LO|HI written by CPU, save/restored by CP + * @rbase: Value of RB_BASE written by CPU, save/restored by CP + * @counter: GPU address of the storage area for the preemption counters + * @bv_rptr_addr: Value of BV_RB_RPTR_ADDR_LO|HI written by CPU, save/restored by CP + */ +struct a6xx_preempt_record { + u32 magic; + u32 info; + u32 errno; + u32 data; + u32 cntl; + u32 rptr; + u32 wptr; + u32 _pad; + u64 rptr_addr; + u64 rbase; + u64 counter; + u64 bv_rptr_addr; +}; + +#define A6XX_PREEMPT_RECORD_MAGIC 0xAE399D6EUL + +#define PREEMPT_SMMU_INFO_SIZE 4096 + +#define PREEMPT_RECORD_SIZE(adreno_gpu) \ + ((adreno_gpu->info->preempt_record_size) == 0 ? \ + 4192 * SZ_1K : (adreno_gpu->info->preempt_record_size)) + +/* + * The preemption counter block is a storage area for the value of the + * preemption counters that are saved immediately before context switch. We + * append it on to the end of the allocation for the preemption record. + */ +#define A6XX_PREEMPT_COUNTER_SIZE (16 * 4) + +struct a7xx_cp_smmu_info { + u32 magic; + u32 _pad4; + u64 ttbr0; + u32 asid; + u32 context_idr; + u32 context_bank; +}; + +#define GEN7_CP_SMMU_INFO_MAGIC 0x241350d5UL + +/* * Given a register and a count, return a value to program into * REG_CP_PROTECT_REG(n) - this will block both reads and writes for * _len + 1 registers starting at _reg. @@ -108,6 +250,34 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node); int a6xx_gmu_wrapper_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node); void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu); +void a6xx_preempt_init(struct msm_gpu *gpu); +void a6xx_preempt_hw_init(struct msm_gpu *gpu); +void a6xx_preempt_trigger(struct msm_gpu *gpu); +void a6xx_preempt_irq(struct msm_gpu *gpu); +void a6xx_preempt_fini(struct msm_gpu *gpu); +int a6xx_preempt_submitqueue_setup(struct msm_gpu *gpu, + struct msm_gpu_submitqueue *queue); +void a6xx_preempt_submitqueue_close(struct msm_gpu *gpu, + struct msm_gpu_submitqueue *queue); + +/* Return true if we are in a preempt state */ +static inline bool a6xx_in_preempt(struct a6xx_gpu *a6xx_gpu) +{ + /* + * Make sure the read to preempt_state is ordered with respect to reads + * of other variables before ... + */ + smp_rmb(); + + int preempt_state = atomic_read(&a6xx_gpu->preempt_state); + + /* ... and after. */ + smp_rmb(); + + return !(preempt_state == PREEMPT_NONE || + preempt_state == PREEMPT_FINISH); +} + void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp, bool suspended); unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c index cdb3f6e74d3e..cb8844ed46b2 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c @@ -478,6 +478,37 @@ static void a660_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) msg->cnoc_cmds_data[1][0] = 0x60000001; } +static void a663_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) +{ + /* + * Send a single "off" entry just to get things running + * TODO: bus scaling + */ + msg->bw_level_num = 1; + + msg->ddr_cmds_num = 3; + msg->ddr_wait_bitmask = 0x07; + + msg->ddr_cmds_addrs[0] = 0x50004; + msg->ddr_cmds_addrs[1] = 0x50000; + msg->ddr_cmds_addrs[2] = 0x500b4; + + msg->ddr_cmds_data[0][0] = 0x40000000; + msg->ddr_cmds_data[0][1] = 0x40000000; + msg->ddr_cmds_data[0][2] = 0x40000000; + + /* + * These are the CX (CNOC) votes - these are used by the GMU but the + * votes are known and fixed for the target + */ + msg->cnoc_cmds_num = 1; + msg->cnoc_wait_bitmask = 0x01; + + msg->cnoc_cmds_addrs[0] = 0x50058; + msg->cnoc_cmds_data[0][0] = 0x40000000; + msg->cnoc_cmds_data[1][0] = 0x60000001; +} + static void adreno_7c3_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) { /* @@ -630,32 +661,44 @@ static void a6xx_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu) { - struct a6xx_hfi_msg_bw_table msg = { 0 }; + struct a6xx_hfi_msg_bw_table *msg; struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + if (gmu->bw_table) + goto send; + + msg = devm_kzalloc(gmu->dev, sizeof(*msg), GFP_KERNEL); + if (!msg) + return -ENOMEM; + if (adreno_is_a618(adreno_gpu)) - a618_build_bw_table(&msg); + a618_build_bw_table(msg); else if (adreno_is_a619(adreno_gpu)) - a619_build_bw_table(&msg); + a619_build_bw_table(msg); else if (adreno_is_a640_family(adreno_gpu)) - a640_build_bw_table(&msg); + a640_build_bw_table(msg); else if (adreno_is_a650(adreno_gpu)) - a650_build_bw_table(&msg); + a650_build_bw_table(msg); else if (adreno_is_7c3(adreno_gpu)) - adreno_7c3_build_bw_table(&msg); + adreno_7c3_build_bw_table(msg); else if (adreno_is_a660(adreno_gpu)) - a660_build_bw_table(&msg); + a660_build_bw_table(msg); + else if (adreno_is_a663(adreno_gpu)) + a663_build_bw_table(msg); else if (adreno_is_a690(adreno_gpu)) - a690_build_bw_table(&msg); + a690_build_bw_table(msg); else if (adreno_is_a730(adreno_gpu)) - a730_build_bw_table(&msg); + a730_build_bw_table(msg); else if (adreno_is_a740_family(adreno_gpu)) - a740_build_bw_table(&msg); + a740_build_bw_table(msg); else - a6xx_build_bw_table(&msg); + a6xx_build_bw_table(msg); + + gmu->bw_table = msg; - return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_BW_TABLE, &msg, sizeof(msg), +send: + return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_BW_TABLE, gmu->bw_table, sizeof(*(gmu->bw_table)), NULL, 0); } diff --git a/drivers/gpu/drm/msm/adreno/a6xx_preempt.c b/drivers/gpu/drm/msm/adreno/a6xx_preempt.c new file mode 100644 index 000000000000..2fd4e39f618f --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/a6xx_preempt.c @@ -0,0 +1,456 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. */ +/* Copyright (c) 2023 Collabora, Ltd. */ +/* Copyright (c) 2024 Valve Corporation */ + +#include "msm_gem.h" +#include "a6xx_gpu.h" +#include "a6xx_gmu.xml.h" +#include "msm_mmu.h" +#include "msm_gpu_trace.h" + +/* + * Try to transition the preemption state from old to new. Return + * true on success or false if the original state wasn't 'old' + */ +static inline bool try_preempt_state(struct a6xx_gpu *a6xx_gpu, + enum a6xx_preempt_state old, enum a6xx_preempt_state new) +{ + enum a6xx_preempt_state cur = atomic_cmpxchg(&a6xx_gpu->preempt_state, + old, new); + + return (cur == old); +} + +/* + * Force the preemption state to the specified state. This is used in cases + * where the current state is known and won't change + */ +static inline void set_preempt_state(struct a6xx_gpu *gpu, + enum a6xx_preempt_state new) +{ + /* + * preempt_state may be read by other cores trying to trigger a + * preemption or in the interrupt handler so barriers are needed + * before... + */ + smp_mb__before_atomic(); + atomic_set(&gpu->preempt_state, new); + /* ... and after*/ + smp_mb__after_atomic(); +} + +/* Write the most recent wptr for the given ring into the hardware */ +static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) +{ + unsigned long flags; + uint32_t wptr; + + spin_lock_irqsave(&ring->preempt_lock, flags); + + if (ring->restore_wptr) { + wptr = get_wptr(ring); + + gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr); + + ring->restore_wptr = false; + } + + spin_unlock_irqrestore(&ring->preempt_lock, flags); +} + +/* Return the highest priority ringbuffer with something in it */ +static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + + unsigned long flags; + int i; + + for (i = 0; i < gpu->nr_rings; i++) { + bool empty; + struct msm_ringbuffer *ring = gpu->rb[i]; + + spin_lock_irqsave(&ring->preempt_lock, flags); + empty = (get_wptr(ring) == gpu->funcs->get_rptr(gpu, ring)); + if (!empty && ring == a6xx_gpu->cur_ring) + empty = ring->memptrs->fence == a6xx_gpu->last_seqno[i]; + spin_unlock_irqrestore(&ring->preempt_lock, flags); + + if (!empty) + return ring; + } + + return NULL; +} + +static void a6xx_preempt_timer(struct timer_list *t) +{ + struct a6xx_gpu *a6xx_gpu = from_timer(a6xx_gpu, t, preempt_timer); + struct msm_gpu *gpu = &a6xx_gpu->base.base; + struct drm_device *dev = gpu->dev; + + if (!try_preempt_state(a6xx_gpu, PREEMPT_TRIGGERED, PREEMPT_FAULTED)) + return; + + dev_err(dev->dev, "%s: preemption timed out\n", gpu->name); + kthread_queue_work(gpu->worker, &gpu->recover_work); +} + +static void preempt_prepare_postamble(struct a6xx_gpu *a6xx_gpu) +{ + u32 *postamble = a6xx_gpu->preempt_postamble_ptr; + u32 count = 0; + + postamble[count++] = PKT7(CP_REG_RMW, 3); + postamble[count++] = REG_A6XX_RBBM_PERFCTR_SRAM_INIT_CMD; + postamble[count++] = 0; + postamble[count++] = 1; + + postamble[count++] = PKT7(CP_WAIT_REG_MEM, 6); + postamble[count++] = CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ); + postamble[count++] = CP_WAIT_REG_MEM_1_POLL_ADDR_LO( + REG_A6XX_RBBM_PERFCTR_SRAM_INIT_STATUS); + postamble[count++] = CP_WAIT_REG_MEM_2_POLL_ADDR_HI(0); + postamble[count++] = CP_WAIT_REG_MEM_3_REF(0x1); + postamble[count++] = CP_WAIT_REG_MEM_4_MASK(0x1); + postamble[count++] = CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(0); + + a6xx_gpu->preempt_postamble_len = count; + + a6xx_gpu->postamble_enabled = true; +} + +static void preempt_disable_postamble(struct a6xx_gpu *a6xx_gpu) +{ + u32 *postamble = a6xx_gpu->preempt_postamble_ptr; + + /* + * Disable the postamble by replacing the first packet header with a NOP + * that covers the whole buffer. + */ + *postamble = PKT7(CP_NOP, (a6xx_gpu->preempt_postamble_len - 1)); + + a6xx_gpu->postamble_enabled = false; +} + +void a6xx_preempt_irq(struct msm_gpu *gpu) +{ + uint32_t status; + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + struct drm_device *dev = gpu->dev; + + if (!try_preempt_state(a6xx_gpu, PREEMPT_TRIGGERED, PREEMPT_PENDING)) + return; + + /* Delete the preemption watchdog timer */ + del_timer(&a6xx_gpu->preempt_timer); + + /* + * The hardware should be setting the stop bit of CP_CONTEXT_SWITCH_CNTL + * to zero before firing the interrupt, but there is a non zero chance + * of a hardware condition or a software race that could set it again + * before we have a chance to finish. If that happens, log and go for + * recovery + */ + status = gpu_read(gpu, REG_A6XX_CP_CONTEXT_SWITCH_CNTL); + if (unlikely(status & A6XX_CP_CONTEXT_SWITCH_CNTL_STOP)) { + DRM_DEV_ERROR(&gpu->pdev->dev, + "!!!!!!!!!!!!!!!! preemption faulted !!!!!!!!!!!!!! irq\n"); + set_preempt_state(a6xx_gpu, PREEMPT_FAULTED); + dev_err(dev->dev, "%s: Preemption failed to complete\n", + gpu->name); + kthread_queue_work(gpu->worker, &gpu->recover_work); + return; + } + + a6xx_gpu->cur_ring = a6xx_gpu->next_ring; + a6xx_gpu->next_ring = NULL; + + set_preempt_state(a6xx_gpu, PREEMPT_FINISH); + + update_wptr(gpu, a6xx_gpu->cur_ring); + + set_preempt_state(a6xx_gpu, PREEMPT_NONE); + + trace_msm_gpu_preemption_irq(a6xx_gpu->cur_ring->id); + + /* + * Retrigger preemption to avoid a deadlock that might occur when preemption + * is skipped due to it being already in flight when requested. + */ + a6xx_preempt_trigger(gpu); +} + +void a6xx_preempt_hw_init(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + int i; + + /* No preemption if we only have one ring */ + if (gpu->nr_rings == 1) + return; + + for (i = 0; i < gpu->nr_rings; i++) { + struct a6xx_preempt_record *record_ptr = a6xx_gpu->preempt[i]; + + record_ptr->wptr = 0; + record_ptr->rptr = 0; + record_ptr->rptr_addr = shadowptr(a6xx_gpu, gpu->rb[i]); + record_ptr->info = 0; + record_ptr->data = 0; + record_ptr->rbase = gpu->rb[i]->iova; + } + + /* Write a 0 to signal that we aren't switching pagetables */ + gpu_write64(gpu, REG_A6XX_CP_CONTEXT_SWITCH_SMMU_INFO, 0); + + /* Enable the GMEM save/restore feature for preemption */ + gpu_write(gpu, REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE, 0x1); + + /* Reset the preemption state */ + set_preempt_state(a6xx_gpu, PREEMPT_NONE); + + spin_lock_init(&a6xx_gpu->eval_lock); + + /* Always come up on rb 0 */ + a6xx_gpu->cur_ring = gpu->rb[0]; +} + +void a6xx_preempt_trigger(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + unsigned long flags; + struct msm_ringbuffer *ring; + unsigned int cntl; + bool sysprof; + + if (gpu->nr_rings == 1) + return; + + /* + * Lock to make sure another thread attempting preemption doesn't skip it + * while we are still evaluating the next ring. This makes sure the other + * thread does start preemption if we abort it and avoids a soft lock. + */ + spin_lock_irqsave(&a6xx_gpu->eval_lock, flags); + + /* + * Try to start preemption by moving from NONE to START. If + * unsuccessful, a preemption is already in flight + */ + if (!try_preempt_state(a6xx_gpu, PREEMPT_NONE, PREEMPT_START)) { + spin_unlock_irqrestore(&a6xx_gpu->eval_lock, flags); + return; + } + + cntl = A6XX_CP_CONTEXT_SWITCH_CNTL_LEVEL(a6xx_gpu->preempt_level); + + if (a6xx_gpu->skip_save_restore) + cntl |= A6XX_CP_CONTEXT_SWITCH_CNTL_SKIP_SAVE_RESTORE; + + if (a6xx_gpu->uses_gmem) + cntl |= A6XX_CP_CONTEXT_SWITCH_CNTL_USES_GMEM; + + cntl |= A6XX_CP_CONTEXT_SWITCH_CNTL_STOP; + + /* Get the next ring to preempt to */ + ring = get_next_ring(gpu); + + /* + * If no ring is populated or the highest priority ring is the current + * one do nothing except to update the wptr to the latest and greatest + */ + if (!ring || (a6xx_gpu->cur_ring == ring)) { + set_preempt_state(a6xx_gpu, PREEMPT_FINISH); + update_wptr(gpu, a6xx_gpu->cur_ring); + set_preempt_state(a6xx_gpu, PREEMPT_NONE); + spin_unlock_irqrestore(&a6xx_gpu->eval_lock, flags); + return; + } + + spin_unlock_irqrestore(&a6xx_gpu->eval_lock, flags); + + spin_lock_irqsave(&ring->preempt_lock, flags); + + struct a7xx_cp_smmu_info *smmu_info_ptr = + a6xx_gpu->preempt_smmu[ring->id]; + struct a6xx_preempt_record *record_ptr = a6xx_gpu->preempt[ring->id]; + u64 ttbr0 = ring->memptrs->ttbr0; + u32 context_idr = ring->memptrs->context_idr; + + smmu_info_ptr->ttbr0 = ttbr0; + smmu_info_ptr->context_idr = context_idr; + record_ptr->wptr = get_wptr(ring); + + /* + * The GPU will write the wptr we set above when we preempt. Reset + * restore_wptr to make sure that we don't write WPTR to the same + * thing twice. It's still possible subsequent submissions will update + * wptr again, in which case they will set the flag to true. This has + * to be protected by the lock for setting the flag and updating wptr + * to be atomic. + */ + ring->restore_wptr = false; + + trace_msm_gpu_preemption_trigger(a6xx_gpu->cur_ring->id, ring->id); + + spin_unlock_irqrestore(&ring->preempt_lock, flags); + + gpu_write64(gpu, + REG_A6XX_CP_CONTEXT_SWITCH_SMMU_INFO, + a6xx_gpu->preempt_smmu_iova[ring->id]); + + gpu_write64(gpu, + REG_A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR, + a6xx_gpu->preempt_iova[ring->id]); + + a6xx_gpu->next_ring = ring; + + /* Start a timer to catch a stuck preemption */ + mod_timer(&a6xx_gpu->preempt_timer, jiffies + msecs_to_jiffies(10000)); + + /* Enable or disable postamble as needed */ + sysprof = refcount_read(&a6xx_gpu->base.base.sysprof_active) > 1; + + if (!sysprof && !a6xx_gpu->postamble_enabled) + preempt_prepare_postamble(a6xx_gpu); + + if (sysprof && a6xx_gpu->postamble_enabled) + preempt_disable_postamble(a6xx_gpu); + + /* Set the preemption state to triggered */ + set_preempt_state(a6xx_gpu, PREEMPT_TRIGGERED); + + /* Trigger the preemption */ + gpu_write(gpu, REG_A6XX_CP_CONTEXT_SWITCH_CNTL, cntl); +} + +static int preempt_init_ring(struct a6xx_gpu *a6xx_gpu, + struct msm_ringbuffer *ring) +{ + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct msm_gpu *gpu = &adreno_gpu->base; + struct drm_gem_object *bo = NULL; + phys_addr_t ttbr; + u64 iova = 0; + void *ptr; + int asid; + + ptr = msm_gem_kernel_new(gpu->dev, + PREEMPT_RECORD_SIZE(adreno_gpu), + MSM_BO_WC | MSM_BO_MAP_PRIV, gpu->aspace, &bo, &iova); + + if (IS_ERR(ptr)) + return PTR_ERR(ptr); + + memset(ptr, 0, PREEMPT_RECORD_SIZE(adreno_gpu)); + + msm_gem_object_set_name(bo, "preempt_record ring%d", ring->id); + + a6xx_gpu->preempt_bo[ring->id] = bo; + a6xx_gpu->preempt_iova[ring->id] = iova; + a6xx_gpu->preempt[ring->id] = ptr; + + struct a6xx_preempt_record *record_ptr = ptr; + + ptr = msm_gem_kernel_new(gpu->dev, + PREEMPT_SMMU_INFO_SIZE, + MSM_BO_WC | MSM_BO_MAP_PRIV | MSM_BO_GPU_READONLY, + gpu->aspace, &bo, &iova); + + if (IS_ERR(ptr)) + return PTR_ERR(ptr); + + memset(ptr, 0, PREEMPT_SMMU_INFO_SIZE); + + msm_gem_object_set_name(bo, "preempt_smmu_info ring%d", ring->id); + + a6xx_gpu->preempt_smmu_bo[ring->id] = bo; + a6xx_gpu->preempt_smmu_iova[ring->id] = iova; + a6xx_gpu->preempt_smmu[ring->id] = ptr; + + struct a7xx_cp_smmu_info *smmu_info_ptr = ptr; + + msm_iommu_pagetable_params(gpu->aspace->mmu, &ttbr, &asid); + + smmu_info_ptr->magic = GEN7_CP_SMMU_INFO_MAGIC; + smmu_info_ptr->ttbr0 = ttbr; + smmu_info_ptr->asid = 0xdecafbad; + smmu_info_ptr->context_idr = 0; + + /* Set up the defaults on the preemption record */ + record_ptr->magic = A6XX_PREEMPT_RECORD_MAGIC; + record_ptr->info = 0; + record_ptr->data = 0; + record_ptr->rptr = 0; + record_ptr->wptr = 0; + record_ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT; + record_ptr->rbase = ring->iova; + record_ptr->counter = 0; + record_ptr->bv_rptr_addr = rbmemptr(ring, bv_rptr); + + return 0; +} + +void a6xx_preempt_fini(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + int i; + + for (i = 0; i < gpu->nr_rings; i++) + msm_gem_kernel_put(a6xx_gpu->preempt_bo[i], gpu->aspace); +} + +void a6xx_preempt_init(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + int i; + + /* No preemption if we only have one ring */ + if (gpu->nr_rings <= 1) + return; + + for (i = 0; i < gpu->nr_rings; i++) { + if (preempt_init_ring(a6xx_gpu, gpu->rb[i])) + goto fail; + } + + /* TODO: make this configurable? */ + a6xx_gpu->preempt_level = 1; + a6xx_gpu->uses_gmem = 1; + a6xx_gpu->skip_save_restore = 1; + + a6xx_gpu->preempt_postamble_ptr = msm_gem_kernel_new(gpu->dev, + PAGE_SIZE, + MSM_BO_WC | MSM_BO_MAP_PRIV | MSM_BO_GPU_READONLY, + gpu->aspace, &a6xx_gpu->preempt_postamble_bo, + &a6xx_gpu->preempt_postamble_iova); + + preempt_prepare_postamble(a6xx_gpu); + + if (IS_ERR(a6xx_gpu->preempt_postamble_ptr)) + goto fail; + + timer_setup(&a6xx_gpu->preempt_timer, a6xx_preempt_timer, 0); + + return; +fail: + /* + * On any failure our adventure is over. Clean up and + * set nr_rings to 1 to force preemption off + */ + a6xx_preempt_fini(gpu); + gpu->nr_rings = 1; + + DRM_DEV_ERROR(&gpu->pdev->dev, + "preemption init failed, disabling preemption\n"); + + return; +} diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index cfc74a9e2646..236b25c094cd 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -20,6 +20,10 @@ bool allow_vram_carveout = false; MODULE_PARM_DESC(allow_vram_carveout, "Allow using VRAM Carveout, in place of IOMMU"); module_param_named(allow_vram_carveout, allow_vram_carveout, bool, 0600); +int enable_preemption = -1; +MODULE_PARM_DESC(enable_preemption, "Enable preemption (A7xx only) (1=on , 0=disable, -1=auto (default))"); +module_param(enable_preemption, int, 0600); + extern const struct adreno_gpulist a2xx_gpulist; extern const struct adreno_gpulist a3xx_gpulist; extern const struct adreno_gpulist a4xx_gpulist; @@ -389,7 +393,7 @@ static const struct dev_pm_ops adreno_pm_ops = { static struct platform_driver adreno_driver = { .probe = adreno_probe, - .remove_new = adreno_remove, + .remove = adreno_remove, .shutdown = adreno_shutdown, .driver = { .name = "adreno", diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 465a4cd14a43..75f5367e73ca 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -533,7 +533,7 @@ int adreno_load_fw(struct adreno_gpu *adreno_gpu) if (!adreno_gpu->info->fw[i]) continue; - /* Skip loading GMU firwmare with GMU Wrapper */ + /* Skip loading GMU firmware with GMU Wrapper */ if (adreno_has_gmu_wrapper(adreno_gpu) && i == ADRENO_FW_GMU) continue; @@ -572,8 +572,19 @@ struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu, int adreno_hw_init(struct msm_gpu *gpu) { + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + int ret; + VERB("%s", gpu->name); + if (adreno_gpu->info->family >= ADRENO_6XX_GEN1 && + qcom_scm_set_gpu_smmu_aperture_is_available()) { + /* We currently always use context bank 0, so hard code this */ + ret = qcom_scm_set_gpu_smmu_aperture(0); + if (ret) + DRM_DEV_ERROR(gpu->dev->dev, "unable to set SMMU aperture: %d\n", ret); + } + for (int i = 0; i < gpu->nr_rings; i++) { struct msm_ringbuffer *ring = gpu->rb[i]; diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index 58d7e7915c57..e71f420f8b3a 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -56,6 +56,7 @@ enum adreno_family { #define ADRENO_QUIRK_LMLOADKILL_DISABLE BIT(2) #define ADRENO_QUIRK_HAS_HW_APRIV BIT(3) #define ADRENO_QUIRK_HAS_CACHED_COHERENT BIT(4) +#define ADRENO_QUIRK_PREEMPTION BIT(5) /* Helper for formating the chip_id in the way that userspace tools like * crashdec expect. @@ -111,6 +112,7 @@ struct adreno_info { * {SHRT_MAX, 0} sentinal. */ struct adreno_speedbin *speedbins; + u64 preempt_record_size; }; #define ADRENO_CHIP_IDS(tbl...) (uint32_t[]) { tbl, 0 } @@ -156,6 +158,19 @@ static const struct adreno_protect name = { \ .count_max = __count_max, \ }; +struct adreno_reglist_list { + /** @reg: List of register **/ + const u32 *regs; + /** @count: Number of registers in the list **/ + u32 count; +}; + +#define DECLARE_ADRENO_REGLIST_LIST(name) \ +static const struct adreno_reglist_list name = { \ + .regs = name ## _regs, \ + .count = ARRAY_SIZE(name ## _regs), \ +}; + struct adreno_gpu { struct msm_gpu base; const struct adreno_info *info; @@ -455,6 +470,11 @@ static inline int adreno_is_a680(const struct adreno_gpu *gpu) return adreno_is_revn(gpu, 680); } +static inline int adreno_is_a663(const struct adreno_gpu *gpu) +{ + return gpu->info->chip_ids[0] == 0x06060300; +} + static inline int adreno_is_a690(const struct adreno_gpu *gpu) { return gpu->info->chip_ids[0] == 0x06090000; @@ -656,12 +676,15 @@ OUT_PKT4(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt) OUT_RING(ring, PKT4(regindx, cnt)); } +#define PKT7(opcode, cnt) \ + (CP_TYPE7_PKT | (cnt << 0) | (PM4_PARITY(cnt) << 15) | \ + ((opcode & 0x7F) << 16) | (PM4_PARITY(opcode) << 23)) + static inline void OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt) { adreno_wait_ring(ring, cnt + 1); - OUT_RING(ring, CP_TYPE7_PKT | (cnt << 0) | (PM4_PARITY(cnt) << 15) | - ((opcode & 0x7F) << 16) | (PM4_PARITY(opcode) << 23)); + OUT_RING(ring, PKT7(opcode, cnt)); } struct msm_gpu *a2xx_gpu_init(struct drm_device *dev); diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h new file mode 100644 index 000000000000..ab3dfb0b374e --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h @@ -0,0 +1,210 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2023, Linaro Limited + */ + +#ifndef _DPU_1_14_MSM8937_H +#define _DPU_1_14_MSM8937_H + +static const struct dpu_caps msm8937_dpu_caps = { + .max_mixer_width = DEFAULT_DPU_LINE_WIDTH, + .max_mixer_blendstages = 0x4, + .max_linewidth = DEFAULT_DPU_LINE_WIDTH, + .pixel_ram_size = 40 * 1024, + .max_hdeci_exp = MAX_HORZ_DECIMATION, + .max_vdeci_exp = MAX_VERT_DECIMATION, +}; + +static const struct dpu_mdp_cfg msm8937_mdp[] = { + { + .name = "top_0", + .base = 0x0, .len = 0x454, + .features = BIT(DPU_MDP_VSYNC_SEL), + .clk_ctrls = { + [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 }, + [DPU_CLK_CTRL_RGB0] = { .reg_off = 0x2ac, .bit_off = 4 }, + [DPU_CLK_CTRL_RGB1] = { .reg_off = 0x2b4, .bit_off = 4 }, + [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 }, + [DPU_CLK_CTRL_CURSOR0] = { .reg_off = 0x3a8, .bit_off = 16 }, + }, + }, +}; + +static const struct dpu_ctl_cfg msm8937_ctl[] = { + { + .name = "ctl_0", .id = CTL_0, + .base = 0x1000, .len = 0x64, + }, { + .name = "ctl_1", .id = CTL_1, + .base = 0x1200, .len = 0x64, + }, { + .name = "ctl_2", .id = CTL_2, + .base = 0x1400, .len = 0x64, + }, +}; + +static const struct dpu_sspp_cfg msm8937_sspp[] = { + { + .name = "sspp_0", .id = SSPP_VIG0, + .base = 0x4000, .len = 0x150, + .features = VIG_MSM8953_MASK, + .sblk = &dpu_vig_sblk_qseed2, + .xin_id = 0, + .type = SSPP_TYPE_VIG, + .clk_ctrl = DPU_CLK_CTRL_VIG0, + }, { + .name = "sspp_4", .id = SSPP_RGB0, + .base = 0x14000, .len = 0x150, + .features = RGB_MSM8953_MASK, + .sblk = &dpu_rgb_sblk, + .xin_id = 1, + .type = SSPP_TYPE_RGB, + .clk_ctrl = DPU_CLK_CTRL_RGB0, + }, { + .name = "sspp_5", .id = SSPP_RGB1, + .base = 0x16000, .len = 0x150, + .features = RGB_MSM8953_MASK, + .sblk = &dpu_rgb_sblk, + .xin_id = 5, + .type = SSPP_TYPE_RGB, + .clk_ctrl = DPU_CLK_CTRL_RGB1, + }, { + .name = "sspp_8", .id = SSPP_DMA0, + .base = 0x24000, .len = 0x150, + .features = DMA_MSM8953_MASK | BIT(DPU_SSPP_CURSOR), + .sblk = &dpu_dma_sblk, + .xin_id = 2, + .type = SSPP_TYPE_DMA, + .clk_ctrl = DPU_CLK_CTRL_DMA0, + }, +}; + +static const struct dpu_lm_cfg msm8937_lm[] = { + { + .name = "lm_0", .id = LM_0, + .base = 0x44000, .len = 0x320, + .sblk = &msm8998_lm_sblk, + .lm_pair = LM_1, + .pingpong = PINGPONG_0, + .dspp = DSPP_0, + }, { + .name = "lm_1", .id = LM_1, + .base = 0x45000, .len = 0x320, + .sblk = &msm8998_lm_sblk, + .lm_pair = LM_0, + .pingpong = PINGPONG_1, + }, +}; + +static const struct dpu_pingpong_cfg msm8937_pp[] = { + { + .name = "pingpong_0", .id = PINGPONG_0, + .base = 0x70000, .len = 0xd4, + .features = PINGPONG_MSM8996_MASK, + .sblk = &msm8996_pp_sblk, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), + .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12), + }, { + .name = "pingpong_1", .id = PINGPONG_1, + .base = 0x70800, .len = 0xd4, + .features = PINGPONG_MSM8996_MASK, + .sblk = &msm8996_pp_sblk, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9), + .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13), + }, +}; + +static const struct dpu_dspp_cfg msm8937_dspp[] = { + { + .name = "dspp_0", .id = DSPP_0, + .base = 0x54000, .len = 0x1800, + .features = DSPP_SC7180_MASK, + .sblk = &msm8998_dspp_sblk, + }, +}; + +static const struct dpu_intf_cfg msm8937_intf[] = { + { + .name = "intf_1", .id = INTF_1, + .base = 0x6a800, .len = 0x268, + .type = INTF_DSI, + .controller_id = MSM_DSI_CONTROLLER_0, + .prog_fetch_lines_worst_case = 14, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27), + .intr_tear_rd_ptr = -1, + }, { + .name = "intf_2", .id = INTF_2, + .base = 0x6b000, .len = 0x268, + .type = INTF_DSI, + .controller_id = MSM_DSI_CONTROLLER_1, + .prog_fetch_lines_worst_case = 14, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29), + .intr_tear_rd_ptr = -1, + }, +}; + +static const struct dpu_perf_cfg msm8937_perf_data = { + .max_bw_low = 3100000, + .max_bw_high = 3100000, + .min_core_ib = 2400000, + .min_llcc_ib = 0, /* No LLCC on this SoC */ + .min_dram_ib = 800000, + .undersized_prefill_lines = 2, + .xtra_prefill_lines = 2, + .dest_scale_prefill_lines = 3, + .macrotile_prefill_lines = 4, + .yuv_nv12_prefill_lines = 8, + .linear_prefill_lines = 1, + .downscaling_prefill_lines = 1, + .amortizable_threshold = 25, + .min_prefill_lines = 14, + .danger_lut_tbl = {0xf, 0xffff, 0x0}, + .safe_lut_tbl = {0xfffc, 0xff00, 0xffff}, + .qos_lut_tbl = { + {.nentry = ARRAY_SIZE(msm8998_qos_linear), + .entries = msm8998_qos_linear + }, + {.nentry = ARRAY_SIZE(msm8998_qos_macrotile), + .entries = msm8998_qos_macrotile + }, + {.nentry = ARRAY_SIZE(msm8998_qos_nrt), + .entries = msm8998_qos_nrt + }, + }, + .cdp_cfg = { + {.rd_enable = 1, .wr_enable = 1}, + {.rd_enable = 1, .wr_enable = 0} + }, + .clk_inefficiency_factor = 105, + .bw_inefficiency_factor = 120, +}; + +static const struct dpu_mdss_version msm8937_mdss_ver = { + .core_major_ver = 1, + .core_minor_ver = 14, +}; + +const struct dpu_mdss_cfg dpu_msm8937_cfg = { + .mdss_ver = &msm8937_mdss_ver, + .caps = &msm8937_dpu_caps, + .mdp = msm8937_mdp, + .ctl_count = ARRAY_SIZE(msm8937_ctl), + .ctl = msm8937_ctl, + .sspp_count = ARRAY_SIZE(msm8937_sspp), + .sspp = msm8937_sspp, + .mixer_count = ARRAY_SIZE(msm8937_lm), + .mixer = msm8937_lm, + .dspp_count = ARRAY_SIZE(msm8937_dspp), + .dspp = msm8937_dspp, + .pingpong_count = ARRAY_SIZE(msm8937_pp), + .pingpong = msm8937_pp, + .intf_count = ARRAY_SIZE(msm8937_intf), + .intf = msm8937_intf, + .vbif_count = ARRAY_SIZE(msm8996_vbif), + .vbif = msm8996_vbif, + .perf = &msm8937_perf_data, +}; + +#endif diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h new file mode 100644 index 000000000000..6bdaecca6761 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h @@ -0,0 +1,187 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2023, Linaro Limited + */ + +#ifndef _DPU_1_14_MSM8917_H +#define _DPU_1_14_MSM8917_H + +static const struct dpu_caps msm8917_dpu_caps = { + .max_mixer_width = DEFAULT_DPU_LINE_WIDTH, + .max_mixer_blendstages = 0x4, + .max_linewidth = DEFAULT_DPU_LINE_WIDTH, + .pixel_ram_size = 16 * 1024, + .max_hdeci_exp = MAX_HORZ_DECIMATION, + .max_vdeci_exp = MAX_VERT_DECIMATION, +}; + +static const struct dpu_mdp_cfg msm8917_mdp[] = { + { + .name = "top_0", + .base = 0x0, .len = 0x454, + .features = BIT(DPU_MDP_VSYNC_SEL), + .clk_ctrls = { + [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 }, + [DPU_CLK_CTRL_RGB0] = { .reg_off = 0x2ac, .bit_off = 4 }, + [DPU_CLK_CTRL_RGB1] = { .reg_off = 0x2b4, .bit_off = 4 }, + [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 }, + [DPU_CLK_CTRL_CURSOR0] = { .reg_off = 0x3a8, .bit_off = 16 }, + }, + }, +}; + +static const struct dpu_ctl_cfg msm8917_ctl[] = { + { + .name = "ctl_0", .id = CTL_0, + .base = 0x1000, .len = 0x64, + }, { + .name = "ctl_1", .id = CTL_1, + .base = 0x1200, .len = 0x64, + }, { + .name = "ctl_2", .id = CTL_2, + .base = 0x1400, .len = 0x64, + }, +}; + +static const struct dpu_sspp_cfg msm8917_sspp[] = { + { + .name = "sspp_0", .id = SSPP_VIG0, + .base = 0x4000, .len = 0x150, + .features = VIG_MSM8953_MASK, + .sblk = &dpu_vig_sblk_qseed2, + .xin_id = 0, + .type = SSPP_TYPE_VIG, + .clk_ctrl = DPU_CLK_CTRL_VIG0, + }, { + .name = "sspp_4", .id = SSPP_RGB0, + .base = 0x14000, .len = 0x150, + .features = RGB_MSM8953_MASK, + .sblk = &dpu_rgb_sblk, + .xin_id = 1, + .type = SSPP_TYPE_RGB, + .clk_ctrl = DPU_CLK_CTRL_RGB0, + }, { + .name = "sspp_5", .id = SSPP_RGB1, + .base = 0x16000, .len = 0x150, + .features = RGB_MSM8953_MASK, + .sblk = &dpu_rgb_sblk, + .xin_id = 5, + .type = SSPP_TYPE_RGB, + .clk_ctrl = DPU_CLK_CTRL_RGB1, + }, { + .name = "sspp_8", .id = SSPP_DMA0, + .base = 0x24000, .len = 0x150, + .features = DMA_MSM8953_MASK | BIT(DPU_SSPP_CURSOR), + .sblk = &dpu_dma_sblk, + .xin_id = 2, + .type = SSPP_TYPE_DMA, + .clk_ctrl = DPU_CLK_CTRL_DMA0, + }, +}; + +static const struct dpu_lm_cfg msm8917_lm[] = { + { + .name = "lm_0", .id = LM_0, + .base = 0x44000, .len = 0x320, + .sblk = &msm8998_lm_sblk, + .pingpong = PINGPONG_0, + .dspp = DSPP_0, + }, +}; + +static const struct dpu_pingpong_cfg msm8917_pp[] = { + { + .name = "pingpong_0", .id = PINGPONG_0, + .base = 0x70000, .len = 0xd4, + .features = PINGPONG_MSM8996_MASK, + .sblk = &msm8996_pp_sblk, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), + .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12), + }, +}; + +static const struct dpu_dspp_cfg msm8917_dspp[] = { + { + .name = "dspp_0", .id = DSPP_0, + .base = 0x54000, .len = 0x1800, + .features = DSPP_SC7180_MASK, + .sblk = &msm8998_dspp_sblk, + }, +}; + +static const struct dpu_intf_cfg msm8917_intf[] = { + { + .name = "intf_1", .id = INTF_1, + .base = 0x6a800, .len = 0x268, + .type = INTF_DSI, + .controller_id = MSM_DSI_CONTROLLER_0, + .prog_fetch_lines_worst_case = 14, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27), + .intr_tear_rd_ptr = -1, + }, +}; + +static const struct dpu_perf_cfg msm8917_perf_data = { + .max_bw_low = 1800000, + .max_bw_high = 1800000, + .min_core_ib = 2400000, + .min_llcc_ib = 0, /* No LLCC on this SoC */ + .min_dram_ib = 800000, + .undersized_prefill_lines = 2, + .xtra_prefill_lines = 2, + .dest_scale_prefill_lines = 3, + .macrotile_prefill_lines = 4, + .yuv_nv12_prefill_lines = 8, + .linear_prefill_lines = 1, + .downscaling_prefill_lines = 1, + .amortizable_threshold = 25, + .min_prefill_lines = 21, + .danger_lut_tbl = {0xf, 0xffff, 0x0}, + .safe_lut_tbl = {0xfffc, 0xff00, 0xffff}, + .qos_lut_tbl = { + {.nentry = ARRAY_SIZE(msm8998_qos_linear), + .entries = msm8998_qos_linear + }, + {.nentry = ARRAY_SIZE(msm8998_qos_macrotile), + .entries = msm8998_qos_macrotile + }, + {.nentry = ARRAY_SIZE(msm8998_qos_nrt), + .entries = msm8998_qos_nrt + }, + }, + .cdp_cfg = { + {.rd_enable = 1, .wr_enable = 1}, + {.rd_enable = 1, .wr_enable = 0} + }, + .clk_inefficiency_factor = 105, + .bw_inefficiency_factor = 120, +}; + +static const struct dpu_mdss_version msm8917_mdss_ver = { + .core_major_ver = 1, + .core_minor_ver = 15, +}; + +const struct dpu_mdss_cfg dpu_msm8917_cfg = { + .mdss_ver = &msm8917_mdss_ver, + .caps = &msm8917_dpu_caps, + .mdp = msm8917_mdp, + .ctl_count = ARRAY_SIZE(msm8917_ctl), + .ctl = msm8917_ctl, + .sspp_count = ARRAY_SIZE(msm8917_sspp), + .sspp = msm8917_sspp, + .mixer_count = ARRAY_SIZE(msm8917_lm), + .mixer = msm8917_lm, + .dspp_count = ARRAY_SIZE(msm8917_dspp), + .dspp = msm8917_dspp, + .pingpong_count = ARRAY_SIZE(msm8917_pp), + .pingpong = msm8917_pp, + .intf_count = ARRAY_SIZE(msm8917_intf), + .intf = msm8917_intf, + .vbif_count = ARRAY_SIZE(msm8996_vbif), + .vbif = msm8996_vbif, + .perf = &msm8917_perf_data, +}; + +#endif diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h new file mode 100644 index 000000000000..14f36ea6ad0e --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h @@ -0,0 +1,218 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2023, Linaro Limited + */ + +#ifndef _DPU_1_16_MSM8953_H +#define _DPU_1_16_MSM8953_H + +static const struct dpu_caps msm8953_dpu_caps = { + .max_mixer_width = DEFAULT_DPU_LINE_WIDTH, + .max_mixer_blendstages = 0x4, + .max_linewidth = DEFAULT_DPU_LINE_WIDTH, + .pixel_ram_size = 40 * 1024, + .max_hdeci_exp = MAX_HORZ_DECIMATION, + .max_vdeci_exp = MAX_VERT_DECIMATION, +}; + +static const struct dpu_mdp_cfg msm8953_mdp[] = { + { + .name = "top_0", + .base = 0x0, .len = 0x454, + .features = BIT(DPU_MDP_VSYNC_SEL), + .clk_ctrls = { + [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 }, + [DPU_CLK_CTRL_RGB0] = { .reg_off = 0x2ac, .bit_off = 4 }, + [DPU_CLK_CTRL_RGB1] = { .reg_off = 0x2b4, .bit_off = 4 }, + [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 }, + [DPU_CLK_CTRL_CURSOR0] = { .reg_off = 0x3a8, .bit_off = 16 }, + }, + }, +}; + +static const struct dpu_ctl_cfg msm8953_ctl[] = { + { + .name = "ctl_0", .id = CTL_0, + .base = 0x1000, .len = 0x64, + }, { + .name = "ctl_1", .id = CTL_1, + .base = 0x1200, .len = 0x64, + }, { + .name = "ctl_2", .id = CTL_2, + .base = 0x1400, .len = 0x64, + }, +}; + +static const struct dpu_sspp_cfg msm8953_sspp[] = { + { + .name = "sspp_0", .id = SSPP_VIG0, + .base = 0x4000, .len = 0x150, + .features = VIG_MSM8953_MASK, + .sblk = &dpu_vig_sblk_qseed2, + .xin_id = 0, + .type = SSPP_TYPE_VIG, + .clk_ctrl = DPU_CLK_CTRL_VIG0, + }, { + .name = "sspp_4", .id = SSPP_RGB0, + .base = 0x14000, .len = 0x150, + .features = RGB_MSM8953_MASK, + .sblk = &dpu_rgb_sblk, + .xin_id = 1, + .type = SSPP_TYPE_RGB, + .clk_ctrl = DPU_CLK_CTRL_RGB0, + }, { + .name = "sspp_5", .id = SSPP_RGB1, + .base = 0x16000, .len = 0x150, + .features = RGB_MSM8953_MASK, + .sblk = &dpu_rgb_sblk, + .xin_id = 5, + .type = SSPP_TYPE_RGB, + .clk_ctrl = DPU_CLK_CTRL_RGB1, + }, { + .name = "sspp_8", .id = SSPP_DMA0, + .base = 0x24000, .len = 0x150, + .features = DMA_MSM8953_MASK | BIT(DPU_SSPP_CURSOR), + .sblk = &dpu_dma_sblk, + .xin_id = 2, + .type = SSPP_TYPE_DMA, + .clk_ctrl = DPU_CLK_CTRL_DMA0, + }, +}; + +static const struct dpu_lm_cfg msm8953_lm[] = { + { + .name = "lm_0", .id = LM_0, + .base = 0x44000, .len = 0x320, + .sblk = &msm8998_lm_sblk, + .lm_pair = LM_1, + .pingpong = PINGPONG_0, + .dspp = DSPP_0, + }, { + .name = "lm_1", .id = LM_1, + .base = 0x45000, .len = 0x320, + .sblk = &msm8998_lm_sblk, + .lm_pair = LM_0, + .pingpong = PINGPONG_1, + }, +}; + +static const struct dpu_pingpong_cfg msm8953_pp[] = { + { + .name = "pingpong_0", .id = PINGPONG_0, + .base = 0x70000, .len = 0xd4, + .features = PINGPONG_MSM8996_MASK, + .sblk = &msm8996_pp_sblk, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), + .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12), + }, { + .name = "pingpong_1", .id = PINGPONG_1, + .base = 0x70800, .len = 0xd4, + .features = PINGPONG_MSM8996_MASK, + .sblk = &msm8996_pp_sblk, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9), + .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13), + }, +}; + +static const struct dpu_dspp_cfg msm8953_dspp[] = { + { + .name = "dspp_0", .id = DSPP_0, + .base = 0x54000, .len = 0x1800, + .features = DSPP_SC7180_MASK, + .sblk = &msm8998_dspp_sblk, + }, +}; + +static const struct dpu_intf_cfg msm8953_intf[] = { + { + .name = "intf_0", .id = INTF_0, + .base = 0x6a000, .len = 0x268, + .type = INTF_NONE, + .prog_fetch_lines_worst_case = 14, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25), + .intr_tear_rd_ptr = -1, + }, { + .name = "intf_1", .id = INTF_1, + .base = 0x6a800, .len = 0x268, + .type = INTF_DSI, + .controller_id = MSM_DSI_CONTROLLER_0, + .prog_fetch_lines_worst_case = 14, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27), + .intr_tear_rd_ptr = -1, + }, { + .name = "intf_2", .id = INTF_2, + .base = 0x6b000, .len = 0x268, + .type = INTF_DSI, + .controller_id = MSM_DSI_CONTROLLER_1, + .prog_fetch_lines_worst_case = 14, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29), + .intr_tear_rd_ptr = -1, + }, +}; + +static const struct dpu_perf_cfg msm8953_perf_data = { + .max_bw_low = 3400000, + .max_bw_high = 3400000, + .min_core_ib = 2400000, + .min_llcc_ib = 0, /* No LLCC on this SoC */ + .min_dram_ib = 800000, + .undersized_prefill_lines = 2, + .xtra_prefill_lines = 2, + .dest_scale_prefill_lines = 3, + .macrotile_prefill_lines = 4, + .yuv_nv12_prefill_lines = 8, + .linear_prefill_lines = 1, + .downscaling_prefill_lines = 1, + .amortizable_threshold = 25, + .min_prefill_lines = 14, + .danger_lut_tbl = {0xf, 0xffff, 0x0}, + .safe_lut_tbl = {0xfffc, 0xff00, 0xffff}, + .qos_lut_tbl = { + {.nentry = ARRAY_SIZE(msm8998_qos_linear), + .entries = msm8998_qos_linear + }, + {.nentry = ARRAY_SIZE(msm8998_qos_macrotile), + .entries = msm8998_qos_macrotile + }, + {.nentry = ARRAY_SIZE(msm8998_qos_nrt), + .entries = msm8998_qos_nrt + }, + }, + .cdp_cfg = { + {.rd_enable = 1, .wr_enable = 1}, + {.rd_enable = 1, .wr_enable = 0} + }, + .clk_inefficiency_factor = 105, + .bw_inefficiency_factor = 120, +}; + +static const struct dpu_mdss_version msm8953_mdss_ver = { + .core_major_ver = 1, + .core_minor_ver = 16, +}; + +const struct dpu_mdss_cfg dpu_msm8953_cfg = { + .mdss_ver = &msm8953_mdss_ver, + .caps = &msm8953_dpu_caps, + .mdp = msm8953_mdp, + .ctl_count = ARRAY_SIZE(msm8953_ctl), + .ctl = msm8953_ctl, + .sspp_count = ARRAY_SIZE(msm8953_sspp), + .sspp = msm8953_sspp, + .mixer_count = ARRAY_SIZE(msm8953_lm), + .mixer = msm8953_lm, + .dspp_count = ARRAY_SIZE(msm8953_dspp), + .dspp = msm8953_dspp, + .pingpong_count = ARRAY_SIZE(msm8953_pp), + .pingpong = msm8953_pp, + .intf_count = ARRAY_SIZE(msm8953_intf), + .intf = msm8953_intf, + .vbif_count = ARRAY_SIZE(msm8996_vbif), + .vbif = msm8996_vbif, + .perf = &msm8953_perf_data, +}; + +#endif diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h new file mode 100644 index 000000000000..491f6f5827d1 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h @@ -0,0 +1,338 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2023, Linaro Limited + * Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved. + */ + +#ifndef _DPU_1_7_MSM8996_H +#define _DPU_1_7_MSM8996_H + +static const struct dpu_caps msm8996_dpu_caps = { + .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, + .max_mixer_blendstages = 0x7, + .has_src_split = true, + .max_linewidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH, + .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE, + .max_hdeci_exp = MAX_HORZ_DECIMATION, + .max_vdeci_exp = MAX_VERT_DECIMATION, +}; + +static const struct dpu_mdp_cfg msm8996_mdp[] = { + { + .name = "top_0", + .base = 0x0, .len = 0x454, + .features = BIT(DPU_MDP_VSYNC_SEL), + .clk_ctrls = { + [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 }, + [DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 }, + [DPU_CLK_CTRL_VIG2] = { .reg_off = 0x2bc, .bit_off = 0 }, + [DPU_CLK_CTRL_VIG3] = { .reg_off = 0x2c4, .bit_off = 0 }, + [DPU_CLK_CTRL_RGB0] = { .reg_off = 0x2ac, .bit_off = 4 }, + [DPU_CLK_CTRL_RGB1] = { .reg_off = 0x2b4, .bit_off = 4 }, + [DPU_CLK_CTRL_RGB2] = { .reg_off = 0x2bc, .bit_off = 4 }, + [DPU_CLK_CTRL_RGB3] = { .reg_off = 0x2c4, .bit_off = 4 }, + [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 }, + [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 }, + [DPU_CLK_CTRL_CURSOR0] = { .reg_off = 0x3a8, .bit_off = 16 }, + [DPU_CLK_CTRL_CURSOR1] = { .reg_off = 0x3b0, .bit_off = 16 }, + }, + }, +}; + +static const struct dpu_ctl_cfg msm8996_ctl[] = { + { + .name = "ctl_0", .id = CTL_0, + .base = 0x1000, .len = 0x64, + }, { + .name = "ctl_1", .id = CTL_1, + .base = 0x1200, .len = 0x64, + }, { + .name = "ctl_2", .id = CTL_2, + .base = 0x1400, .len = 0x64, + }, { + .name = "ctl_3", .id = CTL_3, + .base = 0x1600, .len = 0x64, + }, { + .name = "ctl_4", .id = CTL_4, + .base = 0x1800, .len = 0x64, + }, +}; + +static const struct dpu_sspp_cfg msm8996_sspp[] = { + { + .name = "sspp_0", .id = SSPP_VIG0, + .base = 0x4000, .len = 0x150, + .features = VIG_MSM8996_MASK, + .sblk = &dpu_vig_sblk_qseed2, + .xin_id = 0, + .type = SSPP_TYPE_VIG, + .clk_ctrl = DPU_CLK_CTRL_VIG0, + }, { + .name = "sspp_1", .id = SSPP_VIG1, + .base = 0x6000, .len = 0x150, + .features = VIG_MSM8996_MASK, + .sblk = &dpu_vig_sblk_qseed2, + .xin_id = 4, + .type = SSPP_TYPE_VIG, + .clk_ctrl = DPU_CLK_CTRL_VIG1, + }, { + .name = "sspp_2", .id = SSPP_VIG2, + .base = 0x8000, .len = 0x150, + .features = VIG_MSM8996_MASK, + .sblk = &dpu_vig_sblk_qseed2, + .xin_id = 8, + .type = SSPP_TYPE_VIG, + .clk_ctrl = DPU_CLK_CTRL_VIG2, + }, { + .name = "sspp_3", .id = SSPP_VIG3, + .base = 0xa000, .len = 0x150, + .features = VIG_MSM8996_MASK, + .sblk = &dpu_vig_sblk_qseed2, + .xin_id = 12, + .type = SSPP_TYPE_VIG, + .clk_ctrl = DPU_CLK_CTRL_VIG3, + }, { + .name = "sspp_4", .id = SSPP_RGB0, + .base = 0x14000, .len = 0x150, + .features = RGB_MSM8996_MASK, + .sblk = &dpu_rgb_sblk, + .xin_id = 1, + .type = SSPP_TYPE_RGB, + .clk_ctrl = DPU_CLK_CTRL_RGB0, + }, { + .name = "sspp_5", .id = SSPP_RGB1, + .base = 0x16000, .len = 0x150, + .features = RGB_MSM8996_MASK, + .sblk = &dpu_rgb_sblk, + .xin_id = 5, + .type = SSPP_TYPE_RGB, + .clk_ctrl = DPU_CLK_CTRL_RGB1, + }, { + .name = "sspp_6", .id = SSPP_RGB2, + .base = 0x18000, .len = 0x150, + .features = RGB_MSM8996_MASK, + .sblk = &dpu_rgb_sblk, + .xin_id = 9, + .type = SSPP_TYPE_RGB, + .clk_ctrl = DPU_CLK_CTRL_RGB2, + }, { + .name = "sspp_7", .id = SSPP_RGB3, + .base = 0x1a000, .len = 0x150, + .features = RGB_MSM8996_MASK, + .sblk = &dpu_rgb_sblk, + .xin_id = 13, + .type = SSPP_TYPE_RGB, + .clk_ctrl = DPU_CLK_CTRL_RGB3, + }, { + .name = "sspp_8", .id = SSPP_DMA0, + .base = 0x24000, .len = 0x150, + .features = DMA_MSM8996_MASK, + .sblk = &dpu_dma_sblk, + .xin_id = 2, + .type = SSPP_TYPE_DMA, + .clk_ctrl = DPU_CLK_CTRL_DMA0, + }, { + .name = "sspp_9", .id = SSPP_DMA1, + .base = 0x26000, .len = 0x150, + .features = DMA_MSM8996_MASK, + .sblk = &dpu_dma_sblk, + .xin_id = 10, + .type = SSPP_TYPE_DMA, + .clk_ctrl = DPU_CLK_CTRL_DMA1, + }, +}; + +static const struct dpu_lm_cfg msm8996_lm[] = { + { + .name = "lm_0", .id = LM_0, + .base = 0x44000, .len = 0x320, + .features = MIXER_MSM8998_MASK, + .sblk = &msm8998_lm_sblk, + .lm_pair = LM_1, + .pingpong = PINGPONG_0, + .dspp = DSPP_0, + }, { + .name = "lm_1", .id = LM_1, + .base = 0x45000, .len = 0x320, + .features = MIXER_MSM8998_MASK, + .sblk = &msm8998_lm_sblk, + .lm_pair = LM_0, + .pingpong = PINGPONG_1, + .dspp = DSPP_1, + }, { + .name = "lm_2", .id = LM_2, + .base = 0x46000, .len = 0x320, + .features = MIXER_MSM8998_MASK, + .sblk = &msm8998_lm_sblk, + .lm_pair = LM_5, + .pingpong = PINGPONG_2, + }, { + .name = "lm_5", .id = LM_5, + .base = 0x49000, .len = 0x320, + .features = MIXER_MSM8998_MASK, + .sblk = &msm8998_lm_sblk, + .lm_pair = LM_2, + .pingpong = PINGPONG_3, + }, +}; + +static const struct dpu_pingpong_cfg msm8996_pp[] = { + { + .name = "pingpong_0", .id = PINGPONG_0, + .base = 0x70000, .len = 0xd4, + .features = PINGPONG_MSM8996_TE2_MASK, + .sblk = &msm8996_pp_sblk_te, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), + .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12), + }, { + .name = "pingpong_1", .id = PINGPONG_1, + .base = 0x70800, .len = 0xd4, + .features = PINGPONG_MSM8996_TE2_MASK, + .sblk = &msm8996_pp_sblk_te, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9), + .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13), + }, { + .name = "pingpong_2", .id = PINGPONG_2, + .base = 0x71000, .len = 0xd4, + .features = PINGPONG_MSM8996_MASK, + .sblk = &msm8996_pp_sblk, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10), + .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14), + }, { + .name = "pingpong_3", .id = PINGPONG_3, + .base = 0x71800, .len = 0xd4, + .features = PINGPONG_MSM8996_MASK, + .sblk = &msm8996_pp_sblk, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11), + .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15), + }, +}; + +static const struct dpu_dsc_cfg msm8996_dsc[] = { + { + .name = "dsc_0", .id = DSC_0, + .base = 0x80000, .len = 0x140, + }, { + .name = "dsc_1", .id = DSC_1, + .base = 0x80400, .len = 0x140, + }, +}; + +static const struct dpu_dspp_cfg msm8996_dspp[] = { + { + .name = "dspp_0", .id = DSPP_0, + .base = 0x54000, .len = 0x1800, + .features = DSPP_SC7180_MASK, + .sblk = &msm8998_dspp_sblk, + }, { + .name = "dspp_1", .id = DSPP_1, + .base = 0x56000, .len = 0x1800, + .features = DSPP_SC7180_MASK, + .sblk = &msm8998_dspp_sblk, + }, +}; + +static const struct dpu_intf_cfg msm8996_intf[] = { + { + .name = "intf_0", .id = INTF_0, + .base = 0x6a000, .len = 0x268, + .type = INTF_NONE, + .prog_fetch_lines_worst_case = 25, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25), + .intr_tear_rd_ptr = -1, + }, { + .name = "intf_1", .id = INTF_1, + .base = 0x6a800, .len = 0x268, + .type = INTF_DSI, + .controller_id = MSM_DSI_CONTROLLER_0, + .prog_fetch_lines_worst_case = 25, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27), + .intr_tear_rd_ptr = -1, + }, { + .name = "intf_2", .id = INTF_2, + .base = 0x6b000, .len = 0x268, + .type = INTF_DSI, + .controller_id = MSM_DSI_CONTROLLER_1, + .prog_fetch_lines_worst_case = 25, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29), + .intr_tear_rd_ptr = -1, + }, { + .name = "intf_3", .id = INTF_3, + .base = 0x6b800, .len = 0x268, + .type = INTF_HDMI, + .prog_fetch_lines_worst_case = 25, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31), + .intr_tear_rd_ptr = -1, + }, +}; + +static const struct dpu_perf_cfg msm8996_perf_data = { + .max_bw_low = 9600000, + .max_bw_high = 9600000, + .min_core_ib = 2400000, + .min_llcc_ib = 0, /* No LLCC on this SoC */ + .min_dram_ib = 800000, + .undersized_prefill_lines = 2, + .xtra_prefill_lines = 2, + .dest_scale_prefill_lines = 3, + .macrotile_prefill_lines = 4, + .yuv_nv12_prefill_lines = 8, + .linear_prefill_lines = 1, + .downscaling_prefill_lines = 1, + .amortizable_threshold = 25, + .min_prefill_lines = 21, + .danger_lut_tbl = {0xf, 0xffff, 0x0}, + .safe_lut_tbl = {0xfffc, 0xff00, 0xffff}, + .qos_lut_tbl = { + {.nentry = ARRAY_SIZE(msm8998_qos_linear), + .entries = msm8998_qos_linear + }, + {.nentry = ARRAY_SIZE(msm8998_qos_macrotile), + .entries = msm8998_qos_macrotile + }, + {.nentry = ARRAY_SIZE(msm8998_qos_nrt), + .entries = msm8998_qos_nrt + }, + }, + .cdp_cfg = { + {.rd_enable = 1, .wr_enable = 1}, + {.rd_enable = 1, .wr_enable = 0} + }, + .clk_inefficiency_factor = 105, + .bw_inefficiency_factor = 120, +}; + +static const struct dpu_mdss_version msm8996_mdss_ver = { + .core_major_ver = 1, + .core_minor_ver = 7, +}; + +const struct dpu_mdss_cfg dpu_msm8996_cfg = { + .mdss_ver = &msm8996_mdss_ver, + .caps = &msm8996_dpu_caps, + .mdp = msm8996_mdp, + .ctl_count = ARRAY_SIZE(msm8996_ctl), + .ctl = msm8996_ctl, + .sspp_count = ARRAY_SIZE(msm8996_sspp), + .sspp = msm8996_sspp, + .mixer_count = ARRAY_SIZE(msm8996_lm), + .mixer = msm8996_lm, + .dspp_count = ARRAY_SIZE(msm8996_dspp), + .dspp = msm8996_dspp, + .pingpong_count = ARRAY_SIZE(msm8996_pp), + .pingpong = msm8996_pp, + .dsc_count = ARRAY_SIZE(msm8996_dsc), + .dsc = msm8996_dsc, + .intf_count = ARRAY_SIZE(msm8996_intf), + .intf = msm8996_intf, + .vbif_count = ARRAY_SIZE(msm8996_vbif), + .vbif = msm8996_vbif, + .perf = &msm8996_perf_data, +}; + +#endif diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h index 1d3e9666c741..64c94e919a69 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h @@ -157,18 +157,6 @@ static const struct dpu_lm_cfg msm8998_lm[] = { .lm_pair = LM_5, .pingpong = PINGPONG_2, }, { - .name = "lm_3", .id = LM_3, - .base = 0x47000, .len = 0x320, - .features = MIXER_MSM8998_MASK, - .sblk = &msm8998_lm_sblk, - .pingpong = PINGPONG_NONE, - }, { - .name = "lm_4", .id = LM_4, - .base = 0x48000, .len = 0x320, - .features = MIXER_MSM8998_MASK, - .sblk = &msm8998_lm_sblk, - .pingpong = PINGPONG_NONE, - }, { .name = "lm_5", .id = LM_5, .base = 0x49000, .len = 0x320, .features = MIXER_MSM8998_MASK, diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h index 7a23389a5732..72bd4f7e9e50 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h @@ -156,25 +156,13 @@ static const struct dpu_lm_cfg sdm845_lm[] = { .pingpong = PINGPONG_2, .dspp = DSPP_2, }, { - .name = "lm_3", .id = LM_3, - .base = 0x0, .len = 0x320, - .features = MIXER_SDM845_MASK, - .sblk = &sdm845_lm_sblk, - .pingpong = PINGPONG_NONE, - .dspp = DSPP_3, - }, { - .name = "lm_4", .id = LM_4, - .base = 0x0, .len = 0x320, - .features = MIXER_SDM845_MASK, - .sblk = &sdm845_lm_sblk, - .pingpong = PINGPONG_NONE, - }, { .name = "lm_5", .id = LM_5, .base = 0x49000, .len = 0x320, .features = MIXER_SDM845_MASK, .sblk = &sdm845_lm_sblk, .lm_pair = LM_2, .pingpong = PINGPONG_3, + .dspp = DSPP_3, }, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h new file mode 100644 index 000000000000..907b4d7ceb47 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h @@ -0,0 +1,485 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _DPU_8_4_SA8775P_H +#define _DPU_8_4_SA8775P_H + +static const struct dpu_caps sa8775p_dpu_caps = { + .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, + .max_mixer_blendstages = 0xb, + .has_src_split = true, + .has_dim_layer = true, + .has_idle_pc = true, + .has_3d_merge = true, + .max_linewidth = 5120, + .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE, +}; + +static const struct dpu_mdp_cfg sa8775p_mdp = { + .name = "top_0", + .base = 0x0, .len = 0x494, + .features = BIT(DPU_MDP_PERIPH_0_REMOVED), + .clk_ctrls = { + [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 }, + [DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 }, + [DPU_CLK_CTRL_VIG2] = { .reg_off = 0x2bc, .bit_off = 0 }, + [DPU_CLK_CTRL_VIG3] = { .reg_off = 0x2c4, .bit_off = 0 }, + [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 }, + [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 }, + [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 }, + [DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 }, + [DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 }, + [DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 }, + }, +}; + +/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */ +static const struct dpu_ctl_cfg sa8775p_ctl[] = { + { + .name = "ctl_0", .id = CTL_0, + .base = 0x15000, .len = 0x204, + .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK, + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9), + }, { + .name = "ctl_1", .id = CTL_1, + .base = 0x16000, .len = 0x204, + .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK, + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10), + }, { + .name = "ctl_2", .id = CTL_2, + .base = 0x17000, .len = 0x204, + .features = CTL_SC7280_MASK, + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11), + }, { + .name = "ctl_3", .id = CTL_3, + .base = 0x18000, .len = 0x204, + .features = CTL_SC7280_MASK, + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12), + }, { + .name = "ctl_4", .id = CTL_4, + .base = 0x19000, .len = 0x204, + .features = CTL_SC7280_MASK, + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13), + }, { + .name = "ctl_5", .id = CTL_5, + .base = 0x1a000, .len = 0x204, + .features = CTL_SC7280_MASK, + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23), + }, +}; + +static const struct dpu_sspp_cfg sa8775p_sspp[] = { + { + .name = "sspp_0", .id = SSPP_VIG0, + .base = 0x4000, .len = 0x32c, + .features = VIG_SDM845_MASK_SDMA, + .sblk = &dpu_vig_sblk_qseed3_3_1, + .xin_id = 0, + .type = SSPP_TYPE_VIG, + .clk_ctrl = DPU_CLK_CTRL_VIG0, + }, { + .name = "sspp_1", .id = SSPP_VIG1, + .base = 0x6000, .len = 0x32c, + .features = VIG_SDM845_MASK_SDMA, + .sblk = &dpu_vig_sblk_qseed3_3_1, + .xin_id = 4, + .type = SSPP_TYPE_VIG, + .clk_ctrl = DPU_CLK_CTRL_VIG1, + }, { + .name = "sspp_2", .id = SSPP_VIG2, + .base = 0x8000, .len = 0x32c, + .features = VIG_SDM845_MASK_SDMA, + .sblk = &dpu_vig_sblk_qseed3_3_1, + .xin_id = 8, + .type = SSPP_TYPE_VIG, + .clk_ctrl = DPU_CLK_CTRL_VIG2, + }, { + .name = "sspp_3", .id = SSPP_VIG3, + .base = 0xa000, .len = 0x32c, + .features = VIG_SDM845_MASK_SDMA, + .sblk = &dpu_vig_sblk_qseed3_3_1, + .xin_id = 12, + .type = SSPP_TYPE_VIG, + .clk_ctrl = DPU_CLK_CTRL_VIG3, + }, { + .name = "sspp_8", .id = SSPP_DMA0, + .base = 0x24000, .len = 0x32c, + .features = DMA_SDM845_MASK_SDMA, + .sblk = &dpu_dma_sblk, + .xin_id = 1, + .type = SSPP_TYPE_DMA, + .clk_ctrl = DPU_CLK_CTRL_DMA0, + }, { + .name = "sspp_9", .id = SSPP_DMA1, + .base = 0x26000, .len = 0x32c, + .features = DMA_SDM845_MASK_SDMA, + .sblk = &dpu_dma_sblk, + .xin_id = 5, + .type = SSPP_TYPE_DMA, + .clk_ctrl = DPU_CLK_CTRL_DMA1, + }, { + .name = "sspp_10", .id = SSPP_DMA2, + .base = 0x28000, .len = 0x32c, + .features = DMA_CURSOR_SDM845_MASK_SDMA, + .sblk = &dpu_dma_sblk, + .xin_id = 9, + .type = SSPP_TYPE_DMA, + .clk_ctrl = DPU_CLK_CTRL_DMA2, + }, { + .name = "sspp_11", .id = SSPP_DMA3, + .base = 0x2a000, .len = 0x32c, + .features = DMA_CURSOR_SDM845_MASK_SDMA, + .sblk = &dpu_dma_sblk, + .xin_id = 13, + .type = SSPP_TYPE_DMA, + .clk_ctrl = DPU_CLK_CTRL_DMA3, + }, +}; + +static const struct dpu_lm_cfg sa8775p_lm[] = { + { + .name = "lm_0", .id = LM_0, + .base = 0x44000, .len = 0x400, + .features = MIXER_SDM845_MASK, + .sblk = &sdm845_lm_sblk, + .lm_pair = LM_1, + .pingpong = PINGPONG_0, + .dspp = DSPP_0, + }, { + .name = "lm_1", .id = LM_1, + .base = 0x45000, .len = 0x400, + .features = MIXER_SDM845_MASK, + .sblk = &sdm845_lm_sblk, + .lm_pair = LM_0, + .pingpong = PINGPONG_1, + .dspp = DSPP_1, + }, { + .name = "lm_2", .id = LM_2, + .base = 0x46000, .len = 0x400, + .features = MIXER_SDM845_MASK, + .sblk = &sdm845_lm_sblk, + .lm_pair = LM_3, + .pingpong = PINGPONG_2, + .dspp = DSPP_2, + }, { + .name = "lm_3", .id = LM_3, + .base = 0x47000, .len = 0x400, + .features = MIXER_SDM845_MASK, + .sblk = &sdm845_lm_sblk, + .lm_pair = LM_2, + .pingpong = PINGPONG_3, + .dspp = DSPP_3, + }, { + .name = "lm_4", .id = LM_4, + .base = 0x48000, .len = 0x400, + .features = MIXER_SDM845_MASK, + .sblk = &sdm845_lm_sblk, + .lm_pair = LM_5, + .pingpong = PINGPONG_4, + }, { + .name = "lm_5", .id = LM_5, + .base = 0x49000, .len = 0x400, + .features = MIXER_SDM845_MASK, + .sblk = &sdm845_lm_sblk, + .lm_pair = LM_4, + .pingpong = PINGPONG_5, + }, +}; + +static const struct dpu_dspp_cfg sa8775p_dspp[] = { + { + .name = "dspp_0", .id = DSPP_0, + .base = 0x54000, .len = 0x1800, + .features = DSPP_SC7180_MASK, + .sblk = &sdm845_dspp_sblk, + }, { + .name = "dspp_1", .id = DSPP_1, + .base = 0x56000, .len = 0x1800, + .features = DSPP_SC7180_MASK, + .sblk = &sdm845_dspp_sblk, + }, { + .name = "dspp_2", .id = DSPP_2, + .base = 0x58000, .len = 0x1800, + .features = DSPP_SC7180_MASK, + .sblk = &sdm845_dspp_sblk, + }, { + .name = "dspp_3", .id = DSPP_3, + .base = 0x5a000, .len = 0x1800, + .features = DSPP_SC7180_MASK, + .sblk = &sdm845_dspp_sblk, + }, +}; + +static const struct dpu_pingpong_cfg sa8775p_pp[] = { + { + .name = "pingpong_0", .id = PINGPONG_0, + .base = 0x69000, .len = 0, + .features = BIT(DPU_PINGPONG_DITHER), + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_0, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), + }, { + .name = "pingpong_1", .id = PINGPONG_1, + .base = 0x6a000, .len = 0, + .features = BIT(DPU_PINGPONG_DITHER), + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_0, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9), + }, { + .name = "pingpong_2", .id = PINGPONG_2, + .base = 0x6b000, .len = 0, + .features = BIT(DPU_PINGPONG_DITHER), + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_1, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10), + }, { + .name = "pingpong_3", .id = PINGPONG_3, + .base = 0x6c000, .len = 0, + .features = BIT(DPU_PINGPONG_DITHER), + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_1, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11), + }, { + .name = "pingpong_4", .id = PINGPONG_4, + .base = 0x6d000, .len = 0, + .features = BIT(DPU_PINGPONG_DITHER), + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_2, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30), + }, { + .name = "pingpong_5", .id = PINGPONG_5, + .base = 0x6e000, .len = 0, + .features = BIT(DPU_PINGPONG_DITHER), + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_2, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31), + }, { + .name = "pingpong_6", .id = PINGPONG_6, + .base = 0x65800, .len = 0, + .features = BIT(DPU_PINGPONG_DITHER), + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_3, + }, { + .name = "pingpong_7", .id = PINGPONG_7, + .base = 0x65c00, .len = 0, + .features = BIT(DPU_PINGPONG_DITHER), + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_3, + }, +}; + +static const struct dpu_merge_3d_cfg sa8775p_merge_3d[] = { + { + .name = "merge_3d_0", .id = MERGE_3D_0, + .base = 0x4e000, .len = 0x8, + }, { + .name = "merge_3d_1", .id = MERGE_3D_1, + .base = 0x4f000, .len = 0x8, + }, { + .name = "merge_3d_2", .id = MERGE_3D_2, + .base = 0x50000, .len = 0x8, + }, { + .name = "merge_3d_3", .id = MERGE_3D_3, + .base = 0x65f00, .len = 0x8, + }, +}; + +/* + * NOTE: Each display compression engine (DCE) contains dual hard + * slice DSC encoders so both share same base address but with + * its own different sub block address. + */ +static const struct dpu_dsc_cfg sa8775p_dsc[] = { + { + .name = "dce_0_0", .id = DSC_0, + .base = 0x80000, .len = 0x4, + .features = BIT(DPU_DSC_HW_REV_1_2), + .sblk = &dsc_sblk_0, + }, { + .name = "dce_0_1", .id = DSC_1, + .base = 0x80000, .len = 0x4, + .features = BIT(DPU_DSC_HW_REV_1_2), + .sblk = &dsc_sblk_1, + }, { + .name = "dce_1_0", .id = DSC_2, + .base = 0x81000, .len = 0x4, + .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN), + .sblk = &dsc_sblk_0, + }, { + .name = "dce_1_1", .id = DSC_3, + .base = 0x81000, .len = 0x4, + .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN), + .sblk = &dsc_sblk_1, + }, { + .name = "dce_2_0", .id = DSC_4, + .base = 0x82000, .len = 0x4, + .features = BIT(DPU_DSC_HW_REV_1_2), + .sblk = &dsc_sblk_0, + }, { + .name = "dce_2_1", .id = DSC_5, + .base = 0x82000, .len = 0x4, + .features = BIT(DPU_DSC_HW_REV_1_2), + .sblk = &dsc_sblk_1, + }, +}; + +static const struct dpu_wb_cfg sa8775p_wb[] = { + { + .name = "wb_2", .id = WB_2, + .base = 0x65000, .len = 0x2c8, + .features = WB_SM8250_MASK, + .format_list = wb2_formats_rgb_yuv, + .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv), + .clk_ctrl = DPU_CLK_CTRL_WB2, + .xin_id = 6, + .vbif_idx = VBIF_RT, + .maxlinewidth = 4096, + .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4), + }, +}; + +/* TODO: INTF 3, 6, 7 and 8 are used for MST, marked as INTF_NONE for now */ +static const struct dpu_intf_cfg sa8775p_intf[] = { + { + .name = "intf_0", .id = INTF_0, + .base = 0x34000, .len = 0x280, + .features = INTF_SC7280_MASK, + .type = INTF_DP, + .controller_id = MSM_DP_CONTROLLER_0, + .prog_fetch_lines_worst_case = 24, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25), + }, { + .name = "intf_1", .id = INTF_1, + .base = 0x35000, .len = 0x300, + .features = INTF_SC7280_MASK, + .type = INTF_DSI, + .controller_id = MSM_DSI_CONTROLLER_0, + .prog_fetch_lines_worst_case = 24, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27), + .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2), + }, { + .name = "intf_2", .id = INTF_2, + .base = 0x36000, .len = 0x300, + .features = INTF_SC7280_MASK, + .type = INTF_DSI, + .controller_id = MSM_DSI_CONTROLLER_1, + .prog_fetch_lines_worst_case = 24, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29), + .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF2_TEAR_INTR, 2), + }, { + .name = "intf_3", .id = INTF_3, + .base = 0x37000, .len = 0x280, + .features = INTF_SC7280_MASK, + .type = INTF_NONE, + .controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */ + .prog_fetch_lines_worst_case = 24, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31), + }, { + .name = "intf_4", .id = INTF_4, + .base = 0x38000, .len = 0x280, + .features = INTF_SC7280_MASK, + .type = INTF_DP, + .controller_id = MSM_DP_CONTROLLER_1, + .prog_fetch_lines_worst_case = 24, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 20), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 21), + }, { + .name = "intf_6", .id = INTF_6, + .base = 0x3A000, .len = 0x280, + .features = INTF_SC7280_MASK, + .type = INTF_NONE, + .controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */ + .prog_fetch_lines_worst_case = 24, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 17), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 16), + }, { + .name = "intf_7", .id = INTF_7, + .base = 0x3b000, .len = 0x280, + .features = INTF_SC7280_MASK, + .type = INTF_NONE, + .controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */ + .prog_fetch_lines_worst_case = 24, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 18), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 19), + }, { + .name = "intf_8", .id = INTF_8, + .base = 0x3c000, .len = 0x280, + .features = INTF_SC7280_MASK, + .type = INTF_NONE, + .controller_id = MSM_DP_CONTROLLER_1, /* pair with intf_4 for DP MST */ + .prog_fetch_lines_worst_case = 24, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13), + }, +}; + +static const struct dpu_perf_cfg sa8775p_perf_data = { + .max_bw_low = 13600000, + .max_bw_high = 18200000, + .min_core_ib = 2500000, + .min_llcc_ib = 0, + .min_dram_ib = 800000, + .min_prefill_lines = 35, + /* FIXME: lut tables */ + .danger_lut_tbl = {0x3ffff, 0x3ffff, 0x0}, + .safe_lut_tbl = {0xfff0, 0xfff0, 0x1}, + .qos_lut_tbl = { + {.nentry = ARRAY_SIZE(sm6350_qos_linear_macrotile), + .entries = sm6350_qos_linear_macrotile + }, + {.nentry = ARRAY_SIZE(sm6350_qos_linear_macrotile), + .entries = sm6350_qos_linear_macrotile + }, + {.nentry = ARRAY_SIZE(sc7180_qos_nrt), + .entries = sc7180_qos_nrt + }, + /* TODO: macrotile-qseed is different from macrotile */ + }, + .cdp_cfg = { + {.rd_enable = 1, .wr_enable = 1}, + {.rd_enable = 1, .wr_enable = 0} + }, + .clk_inefficiency_factor = 105, + .bw_inefficiency_factor = 120, +}; + +static const struct dpu_mdss_version sa8775p_mdss_ver = { + .core_major_ver = 8, + .core_minor_ver = 4, +}; + +const struct dpu_mdss_cfg dpu_sa8775p_cfg = { + .mdss_ver = &sa8775p_mdss_ver, + .caps = &sa8775p_dpu_caps, + .mdp = &sa8775p_mdp, + .cdm = &sc7280_cdm, + .ctl_count = ARRAY_SIZE(sa8775p_ctl), + .ctl = sa8775p_ctl, + .sspp_count = ARRAY_SIZE(sa8775p_sspp), + .sspp = sa8775p_sspp, + .mixer_count = ARRAY_SIZE(sa8775p_lm), + .mixer = sa8775p_lm, + .dspp_count = ARRAY_SIZE(sa8775p_dspp), + .dspp = sa8775p_dspp, + .pingpong_count = ARRAY_SIZE(sa8775p_pp), + .pingpong = sa8775p_pp, + .dsc_count = ARRAY_SIZE(sa8775p_dsc), + .dsc = sa8775p_dsc, + .merge_3d_count = ARRAY_SIZE(sa8775p_merge_3d), + .merge_3d = sa8775p_merge_3d, + .wb_count = ARRAY_SIZE(sa8775p_wb), + .wb = sa8775p_wb, + .intf_count = ARRAY_SIZE(sa8775p_intf), + .intf = sa8775p_intf, + .vbif_count = ARRAY_SIZE(sdm845_vbif), + .vbif = sdm845_vbif, + .perf = &sa8775p_perf_data, +}; + +#endif diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h index 7c286bafb948..e7183cf05776 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h @@ -8,72 +8,26 @@ #include "dpu_kms.h" #include "dpu_hw_interrupts.h" -/** - * dpu_core_irq_preinstall - perform pre-installation of core IRQ handler - * @kms: MSM KMS handle - * @return: none - */ void dpu_core_irq_preinstall(struct msm_kms *kms); -/** - * dpu_core_irq_uninstall - uninstall core IRQ handler - * @kms: MSM KMS handle - * @return: none - */ void dpu_core_irq_uninstall(struct msm_kms *kms); -/** - * dpu_core_irq - core IRQ handler - * @kms: MSM KMS handle - * @return: interrupt handling status - */ irqreturn_t dpu_core_irq(struct msm_kms *kms); -/** - * dpu_core_irq_read - IRQ helper function for reading IRQ status - * @dpu_kms: DPU handle - * @irq_idx: irq index - * @return: non-zero if irq detected; otherwise no irq detected - */ u32 dpu_core_irq_read( struct dpu_kms *dpu_kms, unsigned int irq_idx); -/** - * dpu_core_irq_register_callback - For registering callback function on IRQ - * interrupt - * @dpu_kms: DPU handle - * @irq_idx: irq index - * @irq_cb: IRQ callback funcion. - * @irq_arg: IRQ callback argument. - * @return: 0 for success registering callback, otherwise failure - * - * This function supports registration of multiple callbacks for each interrupt. - */ int dpu_core_irq_register_callback( struct dpu_kms *dpu_kms, unsigned int irq_idx, void (*irq_cb)(void *arg), void *irq_arg); -/** - * dpu_core_irq_unregister_callback - For unregistering callback function on IRQ - * interrupt - * @dpu_kms: DPU handle - * @irq_idx: irq index - * @return: 0 for success registering callback, otherwise failure - * - * This function supports registration of multiple callbacks for each interrupt. - */ int dpu_core_irq_unregister_callback( struct dpu_kms *dpu_kms, unsigned int irq_idx); -/** - * dpu_debugfs_core_irq_init - register core irq debugfs - * @dpu_kms: pointer to kms - * @parent: debugfs directory root - */ void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms, struct dentry *parent); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c index 68fae048a9a8..6f0a37f954fe 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c @@ -80,7 +80,7 @@ static u64 _dpu_core_perf_calc_clk(const struct dpu_perf_cfg *perf_cfg, mode = &state->adjusted_mode; - crtc_clk = mode->vtotal * mode->hdisplay * drm_mode_vrefresh(mode); + crtc_clk = (u64)mode->vtotal * mode->hdisplay * drm_mode_vrefresh(mode); drm_atomic_crtc_for_each_plane(plane, crtc) { pstate = to_dpu_plane_state(plane->state); @@ -140,6 +140,12 @@ static void _dpu_core_perf_calc_crtc(const struct dpu_core_perf *core_perf, perf->max_per_pipe_ib, perf->bw_ctl); } +/** + * dpu_core_perf_crtc_check - validate performance of the given crtc state + * @crtc: Pointer to crtc + * @state: Pointer to new crtc state + * return: zero if success, or error code otherwise + */ int dpu_core_perf_crtc_check(struct drm_crtc *crtc, struct drm_crtc_state *state) { @@ -301,6 +307,12 @@ static u64 _dpu_core_perf_get_core_clk_rate(struct dpu_kms *kms) return clk_rate; } +/** + * dpu_core_perf_crtc_update - update performance of the given crtc + * @crtc: Pointer to crtc + * @params_changed: true if crtc parameters are modified + * return: zero if success, or error code otherwise + */ int dpu_core_perf_crtc_update(struct drm_crtc *crtc, int params_changed) { @@ -446,6 +458,11 @@ static const struct file_operations dpu_core_perf_mode_fops = { .write = _dpu_core_perf_mode_write, }; +/** + * dpu_core_perf_debugfs_init - initialize debugfs for core performance context + * @dpu_kms: Pointer to the dpu_kms struct + * @parent: Pointer to parent debugfs + */ int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent) { struct dpu_core_perf *perf = &dpu_kms->perf; @@ -482,6 +499,12 @@ int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent) } #endif +/** + * dpu_core_perf_init - initialize the given core performance context + * @perf: Pointer to core performance context + * @perf_cfg: Pointer to platform performance configuration + * @max_core_clk_rate: Maximum core clock rate + */ int dpu_core_perf_init(struct dpu_core_perf *perf, const struct dpu_perf_cfg *perf_cfg, unsigned long max_core_clk_rate) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h index 4186977390bd..451bf8021114 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h @@ -54,47 +54,20 @@ struct dpu_core_perf { u64 fix_core_ab_vote; }; -/** - * dpu_core_perf_crtc_check - validate performance of the given crtc state - * @crtc: Pointer to crtc - * @state: Pointer to new crtc state - * return: zero if success, or error code otherwise - */ int dpu_core_perf_crtc_check(struct drm_crtc *crtc, struct drm_crtc_state *state); -/** - * dpu_core_perf_crtc_update - update performance of the given crtc - * @crtc: Pointer to crtc - * @params_changed: true if crtc parameters are modified - * return: zero if success, or error code otherwise - */ int dpu_core_perf_crtc_update(struct drm_crtc *crtc, int params_changed); -/** - * dpu_core_perf_crtc_release_bw - release bandwidth of the given crtc - * @crtc: Pointer to crtc - */ void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc); -/** - * dpu_core_perf_init - initialize the given core performance context - * @perf: Pointer to core performance context - * @perf_cfg: Pointer to platform performance configuration - * @max_core_clk_rate: Maximum core clock rate - */ int dpu_core_perf_init(struct dpu_core_perf *perf, const struct dpu_perf_cfg *perf_cfg, unsigned long max_core_clk_rate); struct dpu_kms; -/** - * dpu_core_perf_debugfs_init - initialize debugfs for core performance context - * @dpu_kms: Pointer to the dpu_kms struct - * @debugfs_parent: Pointer to parent debugfs - */ int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent); #endif /* _DPU_CORE_PERF_H_ */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index 4c1be2f0555f..9f6ffd344693 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -572,6 +572,10 @@ static void _dpu_crtc_complete_flip(struct drm_crtc *crtc) spin_unlock_irqrestore(&dev->event_lock, flags); } +/** + * dpu_crtc_get_intf_mode - get interface mode of the given crtc + * @crtc: Pointert to crtc + */ enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc) { struct drm_encoder *encoder; @@ -594,6 +598,10 @@ enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc) return INTF_MODE_NONE; } +/** + * dpu_crtc_vblank_callback - called on vblank irq, issues completion events + * @crtc: Pointer to drm crtc object + */ void dpu_crtc_vblank_callback(struct drm_crtc *crtc) { struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); @@ -704,6 +712,10 @@ void dpu_crtc_frame_event_cb(struct drm_crtc *crtc, u32 event) kthread_queue_work(priv->event_thread[crtc_id].worker, &fevent->work); } +/** + * dpu_crtc_complete_commit - callback signalling completion of current commit + * @crtc: Pointer to drm crtc object + */ void dpu_crtc_complete_commit(struct drm_crtc *crtc) { trace_dpu_crtc_complete_commit(DRMID(crtc)); @@ -711,12 +723,13 @@ void dpu_crtc_complete_commit(struct drm_crtc *crtc) _dpu_crtc_complete_flip(crtc); } -static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc, +static int _dpu_crtc_check_and_setup_lm_bounds(struct drm_crtc *crtc, struct drm_crtc_state *state) { struct dpu_crtc_state *cstate = to_dpu_crtc_state(state); struct drm_display_mode *adj_mode = &state->adjusted_mode; u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers; + struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc); int i; for (i = 0; i < cstate->num_mixers; i++) { @@ -727,7 +740,12 @@ static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc, r->y2 = adj_mode->vdisplay; trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r); + + if (drm_rect_width(r) > dpu_kms->catalog->caps->max_mixer_width) + return -E2BIG; } + + return 0; } static void _dpu_crtc_get_pcc_coeff(struct drm_crtc_state *state, @@ -803,7 +821,7 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc, DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id); - _dpu_crtc_setup_lm_bounds(crtc, crtc->state); + _dpu_crtc_check_and_setup_lm_bounds(crtc, crtc->state); /* encoder will trigger pending mask now */ drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) @@ -928,6 +946,10 @@ static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc) return rc; } +/** + * dpu_crtc_commit_kickoff - trigger kickoff of the commit for this crtc + * @crtc: Pointer to drm crtc object + */ void dpu_crtc_commit_kickoff(struct drm_crtc *crtc) { struct drm_encoder *encoder; @@ -1091,9 +1113,6 @@ static void dpu_crtc_disable(struct drm_crtc *crtc, dpu_core_perf_crtc_update(crtc, 0); - memset(cstate->mixers, 0, sizeof(cstate->mixers)); - cstate->num_mixers = 0; - /* disable clk & bw control until clk & bw properties are set */ cstate->bw_control = false; cstate->bw_split_vote = false; @@ -1192,8 +1211,11 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc, if (crtc_state->active_changed) crtc_state->mode_changed = true; - if (cstate->num_mixers) - _dpu_crtc_setup_lm_bounds(crtc, crtc_state); + if (cstate->num_mixers) { + rc = _dpu_crtc_check_and_setup_lm_bounds(crtc, crtc_state); + if (rc) + return rc; + } /* FIXME: move this to dpu_plane_atomic_check? */ drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) { @@ -1224,6 +1246,24 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc, return 0; } +static enum drm_mode_status dpu_crtc_mode_valid(struct drm_crtc *crtc, + const struct drm_display_mode *mode) +{ + struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc); + + /* + * max crtc width is equal to the max mixer width * 2 and max height is 4K + */ + return drm_mode_validate_size(mode, + 2 * dpu_kms->catalog->caps->max_mixer_width, + 4096); +} + +/** + * dpu_crtc_vblank - enable or disable vblanks for this crtc + * @crtc: Pointer to drm crtc object + * @en: true to enable vblanks, false to disable + */ int dpu_crtc_vblank(struct drm_crtc *crtc, bool en) { struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); @@ -1439,10 +1479,19 @@ static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = { .atomic_check = dpu_crtc_atomic_check, .atomic_begin = dpu_crtc_atomic_begin, .atomic_flush = dpu_crtc_atomic_flush, + .mode_valid = dpu_crtc_mode_valid, .get_scanout_position = dpu_crtc_get_scanout_position, }; -/* initialize crtc */ +/** + * dpu_crtc_init - create a new crtc object + * @dev: dpu device + * @plane: base plane + * @cursor: cursor plane + * @return: new crtc object or error + * + * initialize CRTC + */ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane, struct drm_plane *cursor) { diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h index febc3e764a63..0b148f3ce0d7 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h @@ -239,55 +239,17 @@ static inline int dpu_crtc_frame_pending(struct drm_crtc *crtc) return crtc ? atomic_read(&to_dpu_crtc(crtc)->frame_pending) : -EINVAL; } -/** - * dpu_crtc_vblank - enable or disable vblanks for this crtc - * @crtc: Pointer to drm crtc object - * @en: true to enable vblanks, false to disable - */ int dpu_crtc_vblank(struct drm_crtc *crtc, bool en); -/** - * dpu_crtc_vblank_callback - called on vblank irq, issues completion events - * @crtc: Pointer to drm crtc object - */ void dpu_crtc_vblank_callback(struct drm_crtc *crtc); -/** - * dpu_crtc_commit_kickoff - trigger kickoff of the commit for this crtc - * @crtc: Pointer to drm crtc object - */ void dpu_crtc_commit_kickoff(struct drm_crtc *crtc); -/** - * dpu_crtc_complete_commit - callback signalling completion of current commit - * @crtc: Pointer to drm crtc object - */ void dpu_crtc_complete_commit(struct drm_crtc *crtc); -/** - * dpu_crtc_init - create a new crtc object - * @dev: dpu device - * @plane: base plane - * @cursor: cursor plane - * @Return: new crtc object or error - */ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane, struct drm_plane *cursor); -/** - * dpu_crtc_register_custom_event - api for enabling/disabling crtc event - * @kms: Pointer to dpu_kms - * @crtc_drm: Pointer to crtc object - * @event: Event that client is interested - * @en: Flag to enable/disable the event - */ -int dpu_crtc_register_custom_event(struct dpu_kms *kms, - struct drm_crtc *crtc_drm, u32 event, bool en); - -/** - * dpu_crtc_get_intf_mode - get interface mode of the given crtc - * @crtc: Pointert to crtc - */ enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc); /** diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index 3b171bf227d1..83de7564e2c1 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -217,6 +217,10 @@ static u32 dither_matrix[DITHER_MATRIX_SZ] = { 15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10 }; +/** + * dpu_encoder_get_drm_fmt - return DRM fourcc format + * @phys_enc: Pointer to physical encoder structure + */ u32 dpu_encoder_get_drm_fmt(struct dpu_encoder_phys *phys_enc) { struct drm_encoder *drm_enc; @@ -235,6 +239,11 @@ u32 dpu_encoder_get_drm_fmt(struct dpu_encoder_phys *phys_enc) return DRM_FORMAT_RGB888; } +/** + * dpu_encoder_needs_periph_flush - return true if physical encoder requires + * peripheral flush + * @phys_enc: Pointer to physical encoder structure + */ bool dpu_encoder_needs_periph_flush(struct dpu_encoder_phys *phys_enc) { struct drm_encoder *drm_enc; @@ -253,6 +262,10 @@ bool dpu_encoder_needs_periph_flush(struct dpu_encoder_phys *phys_enc) msm_dp_needs_periph_flush(priv->dp[disp_info->h_tile_instance[0]], mode); } +/** + * dpu_encoder_is_widebus_enabled - return bool value if widebus is enabled + * @drm_enc: Pointer to previously created drm encoder structure + */ bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc) { const struct dpu_encoder_virt *dpu_enc; @@ -272,6 +285,11 @@ bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc) return false; } +/** + * dpu_encoder_is_dsc_enabled - indicate whether dsc is enabled + * for the encoder. + * @drm_enc: Pointer to previously created drm encoder structure + */ bool dpu_encoder_is_dsc_enabled(const struct drm_encoder *drm_enc) { const struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); @@ -279,6 +297,12 @@ bool dpu_encoder_is_dsc_enabled(const struct drm_encoder *drm_enc) return dpu_enc->dsc ? true : false; } +/** + * dpu_encoder_get_crc_values_cnt - get number of physical encoders contained + * in virtual encoder that can collect CRC values + * @drm_enc: Pointer to previously created drm encoder structure + * Returns: Number of physical encoders for given drm encoder + */ int dpu_encoder_get_crc_values_cnt(const struct drm_encoder *drm_enc) { struct dpu_encoder_virt *dpu_enc; @@ -297,6 +321,10 @@ int dpu_encoder_get_crc_values_cnt(const struct drm_encoder *drm_enc) return num_intf; } +/** + * dpu_encoder_setup_misr - enable misr calculations + * @drm_enc: Pointer to previously created drm encoder structure + */ void dpu_encoder_setup_misr(const struct drm_encoder *drm_enc) { struct dpu_encoder_virt *dpu_enc; @@ -315,6 +343,13 @@ void dpu_encoder_setup_misr(const struct drm_encoder *drm_enc) } } +/** + * dpu_encoder_get_crc - get the crc value from interface blocks + * @drm_enc: Pointer to previously created drm encoder structure + * @crcs: array to fill with CRC data + * @pos: offset into the @crcs array + * Returns: 0 on success, error otherwise + */ int dpu_encoder_get_crc(const struct drm_encoder *drm_enc, u32 *crcs, int pos) { struct dpu_encoder_virt *dpu_enc; @@ -385,6 +420,12 @@ static char *dpu_encoder_helper_get_intf_type(enum dpu_intf_mode intf_mode) } } +/** + * dpu_encoder_helper_report_irq_timeout - utility to report error that irq has + * timed out, including reporting frame error event to crtc and debug dump + * @phys_enc: Pointer to physical encoder structure + * @intr_idx: Failing interrupt index + */ void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc, enum dpu_intr_idx intr_idx) { @@ -402,6 +443,15 @@ void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc, static int dpu_encoder_helper_wait_event_timeout(int32_t drm_id, u32 irq_idx, struct dpu_encoder_wait_info *info); +/** + * dpu_encoder_helper_wait_for_irq - utility to wait on an irq. + * note: will call dpu_encoder_helper_wait_for_irq on timeout + * @phys_enc: Pointer to physical encoder structure + * @irq_idx: IRQ index + * @func: IRQ callback to be called in case of timeout + * @wait_info: wait info struct + * @return: 0 or -ERROR + */ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc, unsigned int irq_idx, void (*func)(void *arg), @@ -473,6 +523,10 @@ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc, return ret; } +/** + * dpu_encoder_get_vsync_count - get vsync count for the encoder. + * @drm_enc: Pointer to previously created drm encoder structure + */ int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc) { struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); @@ -480,6 +534,10 @@ int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc) return phys ? atomic_read(&phys->vsync_cnt) : 0; } +/** + * dpu_encoder_get_linecount - get interface line count for the encoder. + * @drm_enc: Pointer to previously created drm encoder structure + */ int dpu_encoder_get_linecount(struct drm_encoder *drm_enc) { struct dpu_encoder_virt *dpu_enc; @@ -495,6 +553,13 @@ int dpu_encoder_get_linecount(struct drm_encoder *drm_enc) return linecount; } +/** + * dpu_encoder_helper_split_config - split display configuration helper function + * This helper function may be used by physical encoders to configure + * the split display related registers. + * @phys_enc: Pointer to physical encoder structure + * @interface: enum dpu_intf setting + */ void dpu_encoder_helper_split_config( struct dpu_encoder_phys *phys_enc, enum dpu_intf interface) @@ -544,6 +609,10 @@ void dpu_encoder_helper_split_config( } } +/** + * dpu_encoder_use_dsc_merge - returns true if the encoder uses DSC merge topology. + * @drm_enc: Pointer to previously created drm encoder structure + */ bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc) { struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); @@ -560,6 +629,12 @@ bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc) return (num_dsc > 0) && (num_dsc > intf_count); } +/** + * dpu_encoder_get_dsc_config - get DSC config for the DPU encoder + * This helper function is used by physical encoder to get DSC config + * used for this encoder. + * @drm_enc: Pointer to encoder structure + */ struct drm_dsc_config *dpu_encoder_get_dsc_config(struct drm_encoder *drm_enc) { struct msm_drm_private *priv = drm_enc->dev->dev_private; @@ -624,6 +699,40 @@ static struct msm_display_topology dpu_encoder_get_topology( return topology; } +static void dpu_encoder_assign_crtc_resources(struct dpu_kms *dpu_kms, + struct drm_encoder *drm_enc, + struct dpu_global_state *global_state, + struct drm_crtc_state *crtc_state) +{ + struct dpu_crtc_state *cstate; + struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC]; + struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC]; + struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC]; + int num_lm, num_ctl, num_dspp, i; + + cstate = to_dpu_crtc_state(crtc_state); + + memset(cstate->mixers, 0, sizeof(cstate->mixers)); + + num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, + drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl)); + num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, + drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm)); + num_dspp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, + drm_enc->base.id, DPU_HW_BLK_DSPP, hw_dspp, + ARRAY_SIZE(hw_dspp)); + + for (i = 0; i < num_lm; i++) { + int ctl_idx = (i < num_ctl) ? i : (num_ctl-1); + + cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]); + cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]); + cstate->mixers[i].hw_dspp = i < num_dspp ? to_dpu_hw_dspp(hw_dspp[i]) : NULL; + } + + cstate->num_mixers = num_lm; +} + static int dpu_encoder_virt_atomic_check( struct drm_encoder *drm_enc, struct drm_crtc_state *crtc_state, @@ -692,6 +801,9 @@ static int dpu_encoder_virt_atomic_check( if (!crtc_state->active_changed || crtc_state->enable) ret = dpu_rm_reserve(&dpu_kms->rm, global_state, drm_enc, crtc_state, topology); + if (!ret) + dpu_encoder_assign_crtc_resources(dpu_kms, drm_enc, + global_state, crtc_state); } trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags); @@ -1052,6 +1164,11 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc, return 0; } +/** + * dpu_encoder_prepare_wb_job - prepare writeback job for the encoder. + * @drm_enc: Pointer to previously created drm encoder structure + * @job: Pointer to the current drm writeback job + */ void dpu_encoder_prepare_wb_job(struct drm_encoder *drm_enc, struct drm_writeback_job *job) { @@ -1069,6 +1186,11 @@ void dpu_encoder_prepare_wb_job(struct drm_encoder *drm_enc, } } +/** + * dpu_encoder_cleanup_wb_job - cleanup writeback job for the encoder. + * @drm_enc: Pointer to previously created drm encoder structure + * @job: Pointer to the current drm writeback job + */ void dpu_encoder_cleanup_wb_job(struct drm_encoder *drm_enc, struct drm_writeback_job *job) { @@ -1093,14 +1215,11 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc, struct dpu_encoder_virt *dpu_enc; struct msm_drm_private *priv; struct dpu_kms *dpu_kms; - struct dpu_crtc_state *cstate; struct dpu_global_state *global_state; struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC]; struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC]; - struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC]; - struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC] = { NULL }; struct dpu_hw_blk *hw_dsc[MAX_CHANNELS_PER_ENC]; - int num_lm, num_ctl, num_pp, num_dsc; + int num_ctl, num_pp, num_dsc; unsigned int dsc_mask = 0; int i; @@ -1129,11 +1248,6 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc, ARRAY_SIZE(hw_pp)); num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl)); - num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, - drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm)); - dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, - drm_enc->base.id, DPU_HW_BLK_DSPP, hw_dspp, - ARRAY_SIZE(hw_dspp)); for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i]) @@ -1159,36 +1273,23 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc, dpu_enc->cur_master->hw_cdm = hw_cdm ? to_dpu_hw_cdm(hw_cdm) : NULL; } - cstate = to_dpu_crtc_state(crtc_state); - - for (i = 0; i < num_lm; i++) { - int ctl_idx = (i < num_ctl) ? i : (num_ctl-1); - - cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]); - cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]); - cstate->mixers[i].hw_dspp = to_dpu_hw_dspp(hw_dspp[i]); - } - - cstate->num_mixers = num_lm; - for (i = 0; i < dpu_enc->num_phys_encs; i++) { struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; - if (!dpu_enc->hw_pp[i]) { + phys->hw_pp = dpu_enc->hw_pp[i]; + if (!phys->hw_pp) { DPU_ERROR_ENC(dpu_enc, "no pp block assigned at idx: %d\n", i); return; } - if (!hw_ctl[i]) { + phys->hw_ctl = i < num_ctl ? to_dpu_hw_ctl(hw_ctl[i]) : NULL; + if (!phys->hw_ctl) { DPU_ERROR_ENC(dpu_enc, "no ctl block assigned at idx: %d\n", i); return; } - phys->hw_pp = dpu_enc->hw_pp[i]; - phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[i]); - phys->cached_mode = crtc_state->adjusted_mode; if (phys->ops.atomic_mode_set) phys->ops.atomic_mode_set(phys, crtc_state, conn_state); @@ -1232,6 +1333,10 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc) } } +/** + * dpu_encoder_virt_runtime_resume - pm runtime resume the encoder configs + * @drm_enc: encoder pointer + */ void dpu_encoder_virt_runtime_resume(struct drm_encoder *drm_enc) { struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); @@ -1373,6 +1478,12 @@ static struct dpu_hw_intf *dpu_encoder_get_intf(const struct dpu_mdss_cfg *catal return NULL; } +/** + * dpu_encoder_vblank_callback - Notify virtual encoder of vblank IRQ reception + * @drm_enc: Pointer to drm encoder structure + * @phy_enc: Pointer to physical encoder + * Note: This is called from IRQ handler context. + */ void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc, struct dpu_encoder_phys *phy_enc) { @@ -1395,6 +1506,12 @@ void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc, DPU_ATRACE_END("encoder_vblank_callback"); } +/** + * dpu_encoder_underrun_callback - Notify virtual encoder of underrun IRQ reception + * @drm_enc: Pointer to drm encoder structure + * @phy_enc: Pointer to physical encoder + * Note: This is called from IRQ handler context. + */ void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc, struct dpu_encoder_phys *phy_enc) { @@ -1413,6 +1530,11 @@ void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc, DPU_ATRACE_END("encoder_underrun_callback"); } +/** + * dpu_encoder_assign_crtc - Link the encoder to the crtc it's assigned to + * @drm_enc: encoder pointer + * @crtc: crtc pointer + */ void dpu_encoder_assign_crtc(struct drm_encoder *drm_enc, struct drm_crtc *crtc) { struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); @@ -1425,6 +1547,13 @@ void dpu_encoder_assign_crtc(struct drm_encoder *drm_enc, struct drm_crtc *crtc) spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags); } +/** + * dpu_encoder_toggle_vblank_for_crtc - Toggles vblank interrupts on or off if + * the encoder is assigned to the given crtc + * @drm_enc: encoder pointer + * @crtc: crtc pointer + * @enable: true if vblank should be enabled + */ void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *drm_enc, struct drm_crtc *crtc, bool enable) { @@ -1449,6 +1578,13 @@ void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *drm_enc, } } +/** + * dpu_encoder_frame_done_callback - Notify virtual encoder that this phys + * encoder completes last request frame + * @drm_enc: Pointer to drm encoder structure + * @ready_phys: Pointer to physical encoder + * @event: Event to process + */ void dpu_encoder_frame_done_callback( struct drm_encoder *drm_enc, struct dpu_encoder_phys *ready_phys, u32 event) @@ -1571,6 +1707,12 @@ static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys) phys->ops.trigger_start(phys); } +/** + * dpu_encoder_helper_trigger_start - control start helper function + * This helper function may be optionally specified by physical + * encoders if they require ctl_start triggering. + * @phys_enc: Pointer to physical encoder structure + */ void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc) { struct dpu_hw_ctl *ctl; @@ -1692,6 +1834,11 @@ static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc) spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags); } +/** + * dpu_encoder_trigger_kickoff_pending - Clear the flush bits from previous + * kickoff and trigger the ctl prepare progress for command mode display. + * @drm_enc: encoder pointer + */ void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc) { struct dpu_encoder_virt *dpu_enc; @@ -1768,6 +1915,11 @@ static u32 _dpu_encoder_calculate_linetime(struct dpu_encoder_virt *dpu_enc, return line_time; } +/** + * dpu_encoder_vsync_time - get the time of the next vsync + * @drm_enc: encoder pointer + * @wakeup_time: pointer to ktime_t to write the vsync time to + */ int dpu_encoder_vsync_time(struct drm_encoder *drm_enc, ktime_t *wakeup_time) { struct drm_display_mode *mode; @@ -1914,6 +2066,13 @@ static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc, dsc, dsc_common_mode, initial_lines); } +/** + * dpu_encoder_prepare_for_kickoff - schedule double buffer flip of the ctl + * path (i.e. ctl flush and start) at next appropriate time. + * Immediately: if no previous commit is outstanding. + * Delayed: Block until next trigger can be issued. + * @drm_enc: encoder pointer + */ void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc) { struct dpu_encoder_virt *dpu_enc; @@ -1950,6 +2109,10 @@ void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc) dpu_encoder_prep_dsc(dpu_enc, dpu_enc->dsc); } +/** + * dpu_encoder_is_valid_for_commit - check if encode has valid parameters for commit. + * @drm_enc: Pointer to drm encoder structure + */ bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc) { struct dpu_encoder_virt *dpu_enc; @@ -1971,6 +2134,11 @@ bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc) return true; } +/** + * dpu_encoder_kickoff - trigger a double buffer flip of the ctl path + * (i.e. ctl flush and start) immediately. + * @drm_enc: encoder pointer + */ void dpu_encoder_kickoff(struct drm_encoder *drm_enc) { struct dpu_encoder_virt *dpu_enc; @@ -2069,6 +2237,10 @@ static void dpu_encoder_unprep_dsc(struct dpu_encoder_virt *dpu_enc) } } +/** + * dpu_encoder_helper_phys_cleanup - helper to cleanup dpu pipeline + * @phys_enc: Pointer to physical encoder structure + */ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc) { struct dpu_hw_ctl *ctl = phys_enc->hw_ctl; @@ -2152,6 +2324,12 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc) ctl->ops.clear_pending_flush(ctl); } +/** + * dpu_encoder_helper_phys_setup_cdm - setup chroma down sampling block + * @phys_enc: Pointer to physical encoder + * @dpu_fmt: Pinter to the format description + * @output_type: HDMI/WB + */ void dpu_encoder_helper_phys_setup_cdm(struct dpu_encoder_phys *phys_enc, const struct msm_format *dpu_fmt, u32 output_type) @@ -2456,6 +2634,13 @@ static const struct drm_encoder_funcs dpu_encoder_funcs = { .debugfs_init = dpu_encoder_debugfs_init, }; +/** + * dpu_encoder_init - initialize virtual encoder object + * @dev: Pointer to drm device structure + * @drm_enc_mode: corresponding DRM_MODE_ENCODER_* constant + * @disp_info: Pointer to display information structure + * Returns: Pointer to newly created drm encoder + */ struct drm_encoder *dpu_encoder_init(struct drm_device *dev, int drm_enc_mode, struct msm_display_info *disp_info) @@ -2577,6 +2762,10 @@ int dpu_encoder_wait_for_tx_complete(struct drm_encoder *drm_enc) return ret; } +/** + * dpu_encoder_get_intf_mode - get interface mode of the given encoder + * @encoder: Pointer to drm encoder object + */ enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder) { struct dpu_encoder_virt *dpu_enc = NULL; @@ -2596,6 +2785,12 @@ enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder) return INTF_MODE_NONE; } +/** + * dpu_encoder_helper_get_dsc - get DSC blocks mask for the DPU encoder + * This helper function is used by physical encoder to get DSC blocks mask + * used for this encoder. + * @phys_enc: Pointer to physical encoder structure + */ unsigned int dpu_encoder_helper_get_dsc(struct dpu_encoder_phys *phys_enc) { struct drm_encoder *encoder = phys_enc->parent; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h index f7465a1774aa..92b5ee390788 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h @@ -19,6 +19,8 @@ #define IDLE_TIMEOUT (66 - 16/2) +#define MAX_H_TILES_PER_DISPLAY 2 + /** * struct msm_display_info - defines display properties * @intf_type: INTF_ type @@ -36,159 +38,54 @@ struct msm_display_info { enum dpu_vsync_source vsync_source; }; -/** - * dpu_encoder_assign_crtc - Link the encoder to the crtc it's assigned to - * @encoder: encoder pointer - * @crtc: crtc pointer - */ void dpu_encoder_assign_crtc(struct drm_encoder *encoder, struct drm_crtc *crtc); -/** - * dpu_encoder_toggle_vblank_for_crtc - Toggles vblank interrupts on or off if - * the encoder is assigned to the given crtc - * @encoder: encoder pointer - * @crtc: crtc pointer - * @enable: true if vblank should be enabled - */ void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *encoder, struct drm_crtc *crtc, bool enable); -/** - * dpu_encoder_prepare_for_kickoff - schedule double buffer flip of the ctl - * path (i.e. ctl flush and start) at next appropriate time. - * Immediately: if no previous commit is outstanding. - * Delayed: Block until next trigger can be issued. - * @encoder: encoder pointer - */ void dpu_encoder_prepare_for_kickoff(struct drm_encoder *encoder); -/** - * dpu_encoder_trigger_kickoff_pending - Clear the flush bits from previous - * kickoff and trigger the ctl prepare progress for command mode display. - * @encoder: encoder pointer - */ void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *encoder); -/** - * dpu_encoder_kickoff - trigger a double buffer flip of the ctl path - * (i.e. ctl flush and start) immediately. - * @encoder: encoder pointer - */ void dpu_encoder_kickoff(struct drm_encoder *encoder); -/** - * dpu_encoder_wakeup_time - get the time of the next vsync - */ int dpu_encoder_vsync_time(struct drm_encoder *drm_enc, ktime_t *wakeup_time); int dpu_encoder_wait_for_commit_done(struct drm_encoder *drm_encoder); int dpu_encoder_wait_for_tx_complete(struct drm_encoder *drm_encoder); -/* - * dpu_encoder_get_intf_mode - get interface mode of the given encoder - * @encoder: Pointer to drm encoder object - */ enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder); -/** - * dpu_encoder_virt_runtime_resume - pm runtime resume the encoder configs - * @encoder: encoder pointer - */ void dpu_encoder_virt_runtime_resume(struct drm_encoder *encoder); -/** - * dpu_encoder_init - initialize virtual encoder object - * @dev: Pointer to drm device structure - * @drm_enc_mode: corresponding DRM_MODE_ENCODER_* constant - * @disp_info: Pointer to display information structure - * Returns: Pointer to newly created drm encoder - */ struct drm_encoder *dpu_encoder_init(struct drm_device *dev, int drm_enc_mode, struct msm_display_info *disp_info); -/** - * dpu_encoder_set_idle_timeout - set the idle timeout for video - * and command mode encoders. - * @drm_enc: Pointer to previously created drm encoder structure - * @idle_timeout: idle timeout duration in milliseconds - */ -void dpu_encoder_set_idle_timeout(struct drm_encoder *drm_enc, - u32 idle_timeout); -/** - * dpu_encoder_get_linecount - get interface line count for the encoder. - * @drm_enc: Pointer to previously created drm encoder structure - */ int dpu_encoder_get_linecount(struct drm_encoder *drm_enc); -/** - * dpu_encoder_get_vsync_count - get vsync count for the encoder. - * @drm_enc: Pointer to previously created drm encoder structure - */ int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc); -/** - * dpu_encoder_is_widebus_enabled - return bool value if widebus is enabled - * @drm_enc: Pointer to previously created drm encoder structure - */ bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc); -/** - * dpu_encoder_is_dsc_enabled - indicate whether dsc is enabled - * for the encoder. - * @drm_enc: Pointer to previously created drm encoder structure - */ bool dpu_encoder_is_dsc_enabled(const struct drm_encoder *drm_enc); -/** - * dpu_encoder_get_crc_values_cnt - get number of physical encoders contained - * in virtual encoder that can collect CRC values - * @drm_enc: Pointer to previously created drm encoder structure - * Returns: Number of physical encoders for given drm encoder - */ int dpu_encoder_get_crc_values_cnt(const struct drm_encoder *drm_enc); -/** - * dpu_encoder_setup_misr - enable misr calculations - * @drm_enc: Pointer to previously created drm encoder structure - */ void dpu_encoder_setup_misr(const struct drm_encoder *drm_encoder); -/** - * dpu_encoder_get_crc - get the crc value from interface blocks - * @drm_enc: Pointer to previously created drm encoder structure - * Returns: 0 on success, error otherwise - */ int dpu_encoder_get_crc(const struct drm_encoder *drm_enc, u32 *crcs, int pos); -/** - * dpu_encoder_use_dsc_merge - returns true if the encoder uses DSC merge topology. - * @drm_enc: Pointer to previously created drm encoder structure - */ bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc); -/** - * dpu_encoder_prepare_wb_job - prepare writeback job for the encoder. - * @drm_enc: Pointer to previously created drm encoder structure - * @job: Pointer to the current drm writeback job - */ void dpu_encoder_prepare_wb_job(struct drm_encoder *drm_enc, struct drm_writeback_job *job); -/** - * dpu_encoder_cleanup_wb_job - cleanup writeback job for the encoder. - * @drm_enc: Pointer to previously created drm encoder structure - * @job: Pointer to the current drm writeback job - */ void dpu_encoder_cleanup_wb_job(struct drm_encoder *drm_enc, struct drm_writeback_job *job); -/** - * dpu_encoder_is_valid_for_commit - check if encode has valid parameters for commit. - * @drm_enc: Pointer to drm encoder structure - */ bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc); #endif /* __DPU_ENCODER_H__ */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h index e77ebe3a68da..63f09857025c 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h @@ -279,37 +279,15 @@ struct dpu_encoder_wait_info { s64 timeout_ms; }; -/** - * dpu_encoder_phys_vid_init - Construct a new video mode physical encoder - * @p: Pointer to init params structure - * Return: Error code or newly allocated encoder - */ struct dpu_encoder_phys *dpu_encoder_phys_vid_init(struct drm_device *dev, struct dpu_enc_phys_init_params *p); -/** - * dpu_encoder_phys_cmd_init - Construct a new command mode physical encoder - * @dev: Corresponding device for devres management - * @p: Pointer to init params structure - * Return: Error code or newly allocated encoder - */ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(struct drm_device *dev, struct dpu_enc_phys_init_params *p); -/** - * dpu_encoder_phys_wb_init - initialize writeback encoder - * @dev: Corresponding device for devres management - * @init: Pointer to init info structure with initialization params - */ struct dpu_encoder_phys *dpu_encoder_phys_wb_init(struct drm_device *dev, struct dpu_enc_phys_init_params *p); -/** - * dpu_encoder_helper_trigger_start - control start helper function - * This helper function may be optionally specified by physical - * encoders if they require ctl_start triggering. - * @phys_enc: Pointer to physical encoder structure - */ void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc); static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode( @@ -331,106 +309,38 @@ static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode( return BLEND_3D_NONE; } -/** - * dpu_encoder_helper_get_dsc - get DSC blocks mask for the DPU encoder - * This helper function is used by physical encoder to get DSC blocks mask - * used for this encoder. - * @phys_enc: Pointer to physical encoder structure - */ unsigned int dpu_encoder_helper_get_dsc(struct dpu_encoder_phys *phys_enc); -/** - * dpu_encoder_get_dsc_config - get DSC config for the DPU encoder - * This helper function is used by physical encoder to get DSC config - * used for this encoder. - * @drm_enc: Pointer to encoder structure - */ struct drm_dsc_config *dpu_encoder_get_dsc_config(struct drm_encoder *drm_enc); -/** - * dpu_encoder_get_drm_fmt - return DRM fourcc format - * @phys_enc: Pointer to physical encoder structure - */ u32 dpu_encoder_get_drm_fmt(struct dpu_encoder_phys *phys_enc); -/** - * dpu_encoder_needs_periph_flush - return true if physical encoder requires - * peripheral flush - * @phys_enc: Pointer to physical encoder structure - */ bool dpu_encoder_needs_periph_flush(struct dpu_encoder_phys *phys_enc); -/** - * dpu_encoder_helper_split_config - split display configuration helper function - * This helper function may be used by physical encoders to configure - * the split display related registers. - * @phys_enc: Pointer to physical encoder structure - * @interface: enum dpu_intf setting - */ void dpu_encoder_helper_split_config( struct dpu_encoder_phys *phys_enc, enum dpu_intf interface); -/** - * dpu_encoder_helper_report_irq_timeout - utility to report error that irq has - * timed out, including reporting frame error event to crtc and debug dump - * @phys_enc: Pointer to physical encoder structure - * @intr_idx: Failing interrupt index - */ void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc, enum dpu_intr_idx intr_idx); -/** - * dpu_encoder_helper_wait_for_irq - utility to wait on an irq. - * note: will call dpu_encoder_helper_wait_for_irq on timeout - * @phys_enc: Pointer to physical encoder structure - * @irq: IRQ index - * @func: IRQ callback to be called in case of timeout - * @wait_info: wait info struct - * @Return: 0 or -ERROR - */ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc, unsigned int irq, void (*func)(void *arg), struct dpu_encoder_wait_info *wait_info); -/** - * dpu_encoder_helper_phys_cleanup - helper to cleanup dpu pipeline - * @phys_enc: Pointer to physical encoder structure - */ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc); -/** - * dpu_encoder_helper_phys_setup_cdm - setup chroma down sampling block - * @phys_enc: Pointer to physical encoder - * @output_type: HDMI/WB - */ void dpu_encoder_helper_phys_setup_cdm(struct dpu_encoder_phys *phys_enc, const struct msm_format *dpu_fmt, u32 output_type); -/** - * dpu_encoder_vblank_callback - Notify virtual encoder of vblank IRQ reception - * @drm_enc: Pointer to drm encoder structure - * @phys_enc: Pointer to physical encoder - * Note: This is called from IRQ handler context. - */ void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc, struct dpu_encoder_phys *phy_enc); -/** dpu_encoder_underrun_callback - Notify virtual encoder of underrun IRQ reception - * @drm_enc: Pointer to drm encoder structure - * @phys_enc: Pointer to physical encoder - * Note: This is called from IRQ handler context. - */ void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc, struct dpu_encoder_phys *phy_enc); -/** dpu_encoder_frame_done_callback -- Notify virtual encoder that this phys encoder completes last request frame - * @drm_enc: Pointer to drm encoder structure - * @phys_enc: Pointer to physical encoder - * @event: Event to process - */ void dpu_encoder_frame_done_callback( struct drm_encoder *drm_enc, struct dpu_encoder_phys *ready_phys, u32 event); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c index 6fc31d47cd1d..e9bbccc44dad 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c @@ -720,6 +720,12 @@ static void dpu_encoder_phys_cmd_init_ops( ops->get_line_count = dpu_encoder_phys_cmd_get_line_count; } +/** + * dpu_encoder_phys_cmd_init - Construct a new command mode physical encoder + * @dev: Corresponding device for devres management + * @p: Pointer to init params structure + * Return: Error code or newly allocated encoder + */ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(struct drm_device *dev, struct dpu_enc_phys_init_params *p) { diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c index ba8878d21cf0..abd6600046cb 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c @@ -302,7 +302,7 @@ static void dpu_encoder_phys_vid_setup_timing_engine( intf_cfg.stream_sel = 0; /* Don't care value for video mode */ intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc); intf_cfg.dsc = dpu_encoder_helper_get_dsc(phys_enc); - if (phys_enc->hw_pp->merge_3d) + if (intf_cfg.mode_3d && phys_enc->hw_pp->merge_3d) intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx; spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags); @@ -440,10 +440,12 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc) struct dpu_hw_ctl *ctl; const struct msm_format *fmt; u32 fmt_fourcc; + u32 mode_3d; ctl = phys_enc->hw_ctl; fmt_fourcc = dpu_encoder_get_drm_fmt(phys_enc); fmt = mdp_get_format(&phys_enc->dpu_kms->base, fmt_fourcc, 0); + mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc); DPU_DEBUG_VIDENC(phys_enc, "\n"); @@ -466,7 +468,8 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc) goto skip_flush; ctl->ops.update_pending_flush_intf(ctl, phys_enc->hw_intf->idx); - if (ctl->ops.update_pending_flush_merge_3d && phys_enc->hw_pp->merge_3d) + if (mode_3d && ctl->ops.update_pending_flush_merge_3d && + phys_enc->hw_pp->merge_3d) ctl->ops.update_pending_flush_merge_3d(ctl, phys_enc->hw_pp->merge_3d->idx); if (ctl->ops.update_pending_flush_cdm && phys_enc->hw_cdm) @@ -743,6 +746,12 @@ static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops) ops->get_frame_count = dpu_encoder_phys_vid_get_frame_count; } +/** + * dpu_encoder_phys_vid_init - Construct a new video mode physical encoder + * @dev: Corresponding device for devres management + * @p: Pointer to init params structure + * Return: Error code or newly allocated encoder + */ struct dpu_encoder_phys *dpu_encoder_phys_vid_init(struct drm_device *dev, struct dpu_enc_phys_init_params *p) { diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c index 882c717859ce..4c006ec74575 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c @@ -166,10 +166,10 @@ static void dpu_encoder_phys_wb_set_qos(struct dpu_encoder_phys *phys_enc) /** * dpu_encoder_phys_wb_setup_fb - setup output framebuffer * @phys_enc: Pointer to physical encoder - * @fb: Pointer to output framebuffer + * @format: Format of the framebuffer */ static void dpu_encoder_phys_wb_setup_fb(struct dpu_encoder_phys *phys_enc, - struct drm_framebuffer *fb) + const struct msm_format *format) { struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc); struct dpu_hw_wb *hw_wb; @@ -193,12 +193,12 @@ static void dpu_encoder_phys_wb_setup_fb(struct dpu_encoder_phys *phys_enc, hw_wb->ops.setup_roi(hw_wb, wb_cfg); if (hw_wb->ops.setup_outformat) - hw_wb->ops.setup_outformat(hw_wb, wb_cfg); + hw_wb->ops.setup_outformat(hw_wb, wb_cfg, format); if (hw_wb->ops.setup_cdp) { const struct dpu_perf_cfg *perf = phys_enc->dpu_kms->catalog->perf; - hw_wb->ops.setup_cdp(hw_wb, wb_cfg->dest.format, + hw_wb->ops.setup_cdp(hw_wb, format, perf->cdp_cfg[DPU_PERF_CDP_USAGE_NRT].wr_enable); } @@ -275,6 +275,7 @@ static void _dpu_encoder_phys_wb_update_flush(struct dpu_encoder_phys *phys_enc) struct dpu_hw_pingpong *hw_pp; struct dpu_hw_cdm *hw_cdm; u32 pending_flush = 0; + u32 mode_3d; if (!phys_enc) return; @@ -283,6 +284,7 @@ static void _dpu_encoder_phys_wb_update_flush(struct dpu_encoder_phys *phys_enc) hw_pp = phys_enc->hw_pp; hw_ctl = phys_enc->hw_ctl; hw_cdm = phys_enc->hw_cdm; + mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc); DPU_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0); @@ -294,7 +296,8 @@ static void _dpu_encoder_phys_wb_update_flush(struct dpu_encoder_phys *phys_enc) if (hw_ctl->ops.update_pending_flush_wb) hw_ctl->ops.update_pending_flush_wb(hw_ctl, hw_wb->idx); - if (hw_ctl->ops.update_pending_flush_merge_3d && hw_pp && hw_pp->merge_3d) + if (mode_3d && hw_ctl->ops.update_pending_flush_merge_3d && + hw_pp && hw_pp->merge_3d) hw_ctl->ops.update_pending_flush_merge_3d(hw_ctl, hw_pp->merge_3d->idx); @@ -318,15 +321,10 @@ static void dpu_encoder_phys_wb_setup( { struct dpu_hw_wb *hw_wb = phys_enc->hw_wb; struct drm_display_mode mode = phys_enc->cached_mode; - struct drm_framebuffer *fb = NULL; struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc); - struct drm_writeback_job *wb_job; const struct msm_format *format; - const struct msm_format *dpu_fmt; - wb_job = wb_enc->wb_job; format = msm_framebuffer_format(wb_enc->wb_job->fb); - dpu_fmt = mdp_get_format(&phys_enc->dpu_kms->base, format->pixel_format, wb_job->fb->modifier); DPU_DEBUG("[mode_set:%d, \"%s\",%d,%d]\n", hw_wb->idx - WB_0, mode.name, @@ -338,9 +336,9 @@ static void dpu_encoder_phys_wb_setup( dpu_encoder_phys_wb_set_qos(phys_enc); - dpu_encoder_phys_wb_setup_fb(phys_enc, fb); + dpu_encoder_phys_wb_setup_fb(phys_enc, format); - dpu_encoder_helper_phys_setup_cdm(phys_enc, dpu_fmt, CDM_CDWN_OUTPUT_WB); + dpu_encoder_helper_phys_setup_cdm(phys_enc, format, CDM_CDWN_OUTPUT_WB); dpu_encoder_phys_wb_setup_ctl(phys_enc); } @@ -584,26 +582,20 @@ static void dpu_encoder_phys_wb_prepare_wb_job(struct dpu_encoder_phys *phys_enc format = msm_framebuffer_format(job->fb); - wb_cfg->dest.format = mdp_get_format(&phys_enc->dpu_kms->base, - format->pixel_format, job->fb->modifier); - if (!wb_cfg->dest.format) { - /* this error should be detected during atomic_check */ - DPU_ERROR("failed to get format %p4cc\n", &format->pixel_format); - return; - } - - ret = dpu_format_populate_layout(aspace, job->fb, &wb_cfg->dest); + ret = dpu_format_populate_plane_sizes(job->fb, &wb_cfg->dest); if (ret) { - DPU_DEBUG("failed to populate layout %d\n", ret); + DPU_DEBUG("failed to populate plane sizes%d\n", ret); return; } + dpu_format_populate_addrs(aspace, job->fb, &wb_cfg->dest); + wb_cfg->dest.width = job->fb->width; wb_cfg->dest.height = job->fb->height; - wb_cfg->dest.num_planes = wb_cfg->dest.format->num_planes; + wb_cfg->dest.num_planes = format->num_planes; - if ((wb_cfg->dest.format->fetch_type == MDP_PLANE_PLANAR) && - (wb_cfg->dest.format->element[0] == C1_B_Cb)) + if ((format->fetch_type == MDP_PLANE_PLANAR) && + (format->element[0] == C1_B_Cb)) swap(wb_cfg->dest.plane_addr[1], wb_cfg->dest.plane_addr[2]); DPU_DEBUG("[fb_offset:%8.8x,%8.8x,%8.8x,%8.8x]\n", diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c index 6b1e9a617da3..59c9427da7dd 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c @@ -13,9 +13,6 @@ #define DPU_UBWC_PLANE_SIZE_ALIGNMENT 4096 -#define DPU_MAX_IMG_WIDTH 0x3FFF -#define DPU_MAX_IMG_HEIGHT 0x3FFF - /* * struct dpu_media_color_map - maps drm format to media format * @format: DRM base pixel format @@ -93,10 +90,9 @@ static int _dpu_format_get_media_color_ubwc(const struct msm_format *fmt) return color_fmt; } -static int _dpu_format_get_plane_sizes_ubwc( +static int _dpu_format_populate_plane_sizes_ubwc( const struct msm_format *fmt, - const uint32_t width, - const uint32_t height, + struct drm_framebuffer *fb, struct dpu_hw_fmt_layout *layout) { int i; @@ -104,9 +100,8 @@ static int _dpu_format_get_plane_sizes_ubwc( bool meta = MSM_FORMAT_IS_UBWC(fmt); memset(layout, 0, sizeof(struct dpu_hw_fmt_layout)); - layout->format = fmt; - layout->width = width; - layout->height = height; + layout->width = fb->width; + layout->height = fb->height; layout->num_planes = fmt->num_planes; color = _dpu_format_get_media_color_ubwc(fmt); @@ -116,19 +111,19 @@ static int _dpu_format_get_plane_sizes_ubwc( return -EINVAL; } - if (MSM_FORMAT_IS_YUV(layout->format)) { + if (MSM_FORMAT_IS_YUV(fmt)) { uint32_t y_sclines, uv_sclines; uint32_t y_meta_scanlines = 0; uint32_t uv_meta_scanlines = 0; layout->num_planes = 2; - layout->plane_pitch[0] = VENUS_Y_STRIDE(color, width); - y_sclines = VENUS_Y_SCANLINES(color, height); + layout->plane_pitch[0] = VENUS_Y_STRIDE(color, fb->width); + y_sclines = VENUS_Y_SCANLINES(color, fb->height); layout->plane_size[0] = MSM_MEDIA_ALIGN(layout->plane_pitch[0] * y_sclines, DPU_UBWC_PLANE_SIZE_ALIGNMENT); - layout->plane_pitch[1] = VENUS_UV_STRIDE(color, width); - uv_sclines = VENUS_UV_SCANLINES(color, height); + layout->plane_pitch[1] = VENUS_UV_STRIDE(color, fb->width); + uv_sclines = VENUS_UV_SCANLINES(color, fb->height); layout->plane_size[1] = MSM_MEDIA_ALIGN(layout->plane_pitch[1] * uv_sclines, DPU_UBWC_PLANE_SIZE_ALIGNMENT); @@ -136,13 +131,13 @@ static int _dpu_format_get_plane_sizes_ubwc( goto done; layout->num_planes += 2; - layout->plane_pitch[2] = VENUS_Y_META_STRIDE(color, width); - y_meta_scanlines = VENUS_Y_META_SCANLINES(color, height); + layout->plane_pitch[2] = VENUS_Y_META_STRIDE(color, fb->width); + y_meta_scanlines = VENUS_Y_META_SCANLINES(color, fb->height); layout->plane_size[2] = MSM_MEDIA_ALIGN(layout->plane_pitch[2] * y_meta_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT); - layout->plane_pitch[3] = VENUS_UV_META_STRIDE(color, width); - uv_meta_scanlines = VENUS_UV_META_SCANLINES(color, height); + layout->plane_pitch[3] = VENUS_UV_META_STRIDE(color, fb->width); + uv_meta_scanlines = VENUS_UV_META_SCANLINES(color, fb->height); layout->plane_size[3] = MSM_MEDIA_ALIGN(layout->plane_pitch[3] * uv_meta_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT); @@ -151,16 +146,16 @@ static int _dpu_format_get_plane_sizes_ubwc( layout->num_planes = 1; - layout->plane_pitch[0] = VENUS_RGB_STRIDE(color, width); - rgb_scanlines = VENUS_RGB_SCANLINES(color, height); + layout->plane_pitch[0] = VENUS_RGB_STRIDE(color, fb->width); + rgb_scanlines = VENUS_RGB_SCANLINES(color, fb->height); layout->plane_size[0] = MSM_MEDIA_ALIGN(layout->plane_pitch[0] * rgb_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT); if (!meta) goto done; layout->num_planes += 2; - layout->plane_pitch[2] = VENUS_RGB_META_STRIDE(color, width); - rgb_meta_scanlines = VENUS_RGB_META_SCANLINES(color, height); + layout->plane_pitch[2] = VENUS_RGB_META_STRIDE(color, fb->width); + rgb_meta_scanlines = VENUS_RGB_META_SCANLINES(color, fb->height); layout->plane_size[2] = MSM_MEDIA_ALIGN(layout->plane_pitch[2] * rgb_meta_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT); } @@ -172,26 +167,23 @@ done: return 0; } -static int _dpu_format_get_plane_sizes_linear( +static int _dpu_format_populate_plane_sizes_linear( const struct msm_format *fmt, - const uint32_t width, - const uint32_t height, - struct dpu_hw_fmt_layout *layout, - const uint32_t *pitches) + struct drm_framebuffer *fb, + struct dpu_hw_fmt_layout *layout) { int i; memset(layout, 0, sizeof(struct dpu_hw_fmt_layout)); - layout->format = fmt; - layout->width = width; - layout->height = height; + layout->width = fb->width; + layout->height = fb->height; layout->num_planes = fmt->num_planes; /* Due to memset above, only need to set planes of interest */ if (fmt->fetch_type == MDP_PLANE_INTERLEAVED) { layout->num_planes = 1; - layout->plane_size[0] = width * height * layout->format->bpp; - layout->plane_pitch[0] = width * layout->format->bpp; + layout->plane_size[0] = fb->width * fb->height * fmt->bpp; + layout->plane_pitch[0] = fb->width * fmt->bpp; } else { uint32_t v_subsample, h_subsample; uint32_t chroma_samp; @@ -201,7 +193,7 @@ static int _dpu_format_get_plane_sizes_linear( _dpu_get_v_h_subsample_rate(chroma_samp, &v_subsample, &h_subsample); - if (width % h_subsample || height % v_subsample) { + if (fb->width % h_subsample || fb->height % v_subsample) { DRM_ERROR("mismatch in subsample vs dimensions\n"); return -EINVAL; } @@ -209,11 +201,11 @@ static int _dpu_format_get_plane_sizes_linear( if ((fmt->pixel_format == DRM_FORMAT_NV12) && (MSM_FORMAT_IS_DX(fmt))) bpp = 2; - layout->plane_pitch[0] = width * bpp; + layout->plane_pitch[0] = fb->width * bpp; layout->plane_pitch[1] = layout->plane_pitch[0] / h_subsample; - layout->plane_size[0] = layout->plane_pitch[0] * height; + layout->plane_size[0] = layout->plane_pitch[0] * fb->height; layout->plane_size[1] = layout->plane_pitch[1] * - (height / v_subsample); + (fb->height / v_subsample); if (fmt->fetch_type == MDP_PLANE_PSEUDO_PLANAR) { layout->num_planes = 2; @@ -234,8 +226,13 @@ static int _dpu_format_get_plane_sizes_linear( * all the components based on ubwc specifications. */ for (i = 0; i < layout->num_planes && i < DPU_MAX_PLANES; ++i) { - if (pitches && layout->plane_pitch[i] < pitches[i]) - layout->plane_pitch[i] = pitches[i]; + if (layout->plane_pitch[i] <= fb->pitches[i]) { + layout->plane_pitch[i] = fb->pitches[i]; + } else { + DRM_DEBUG("plane %u expected pitch %u, fb %u\n", + i, layout->plane_pitch[i], fb->pitches[i]); + return -EINVAL; + } } for (i = 0; i < DPU_MAX_PLANES; i++) @@ -244,53 +241,54 @@ static int _dpu_format_get_plane_sizes_linear( return 0; } -static int dpu_format_get_plane_sizes( - const struct msm_format *fmt, - const uint32_t w, - const uint32_t h, - struct dpu_hw_fmt_layout *layout, - const uint32_t *pitches) +/** + * dpu_format_populate_plane_sizes - populate non-address part of the layout based on + * fb, and format found in the fb + * @fb: framebuffer pointer + * @layout: format layout structure to populate + * + * Return: error code on failure or 0 if new addresses were populated + */ +int dpu_format_populate_plane_sizes( + struct drm_framebuffer *fb, + struct dpu_hw_fmt_layout *layout) { - if (!layout || !fmt) { + const struct msm_format *fmt; + + if (!layout || !fb) { DRM_ERROR("invalid pointer\n"); return -EINVAL; } - if ((w > DPU_MAX_IMG_WIDTH) || (h > DPU_MAX_IMG_HEIGHT)) { + if (fb->width > DPU_MAX_IMG_WIDTH || + fb->height > DPU_MAX_IMG_HEIGHT) { DRM_ERROR("image dimensions outside max range\n"); return -ERANGE; } + fmt = msm_framebuffer_format(fb); + if (MSM_FORMAT_IS_UBWC(fmt) || MSM_FORMAT_IS_TILE(fmt)) - return _dpu_format_get_plane_sizes_ubwc(fmt, w, h, layout); + return _dpu_format_populate_plane_sizes_ubwc(fmt, fb, layout); - return _dpu_format_get_plane_sizes_linear(fmt, w, h, layout, pitches); + return _dpu_format_populate_plane_sizes_linear(fmt, fb, layout); } -static int _dpu_format_populate_addrs_ubwc( - struct msm_gem_address_space *aspace, - struct drm_framebuffer *fb, - struct dpu_hw_fmt_layout *layout) +static void _dpu_format_populate_addrs_ubwc(struct msm_gem_address_space *aspace, + struct drm_framebuffer *fb, + struct dpu_hw_fmt_layout *layout) { + const struct msm_format *fmt; uint32_t base_addr = 0; bool meta; - if (!fb || !layout) { - DRM_ERROR("invalid pointers\n"); - return -EINVAL; - } - - if (aspace) - base_addr = msm_framebuffer_iova(fb, aspace, 0); - if (!base_addr) { - DRM_ERROR("failed to retrieve base addr\n"); - return -EFAULT; - } + base_addr = msm_framebuffer_iova(fb, aspace, 0); - meta = MSM_FORMAT_IS_UBWC(layout->format); + fmt = msm_framebuffer_format(fb); + meta = MSM_FORMAT_IS_UBWC(fmt); /* Per-format logic for verifying active planes */ - if (MSM_FORMAT_IS_YUV(layout->format)) { + if (MSM_FORMAT_IS_YUV(fmt)) { /************************************************/ /* UBWC ** */ /* buffer ** DPU PLANE */ @@ -319,7 +317,7 @@ static int _dpu_format_populate_addrs_ubwc( + layout->plane_size[2] + layout->plane_size[3]; if (!meta) - return 0; + return; /* configure Y metadata plane */ layout->plane_addr[2] = base_addr; @@ -350,119 +348,43 @@ static int _dpu_format_populate_addrs_ubwc( layout->plane_addr[1] = 0; if (!meta) - return 0; + return; layout->plane_addr[2] = base_addr; layout->plane_addr[3] = 0; } - return 0; } -static int _dpu_format_populate_addrs_linear( - struct msm_gem_address_space *aspace, - struct drm_framebuffer *fb, - struct dpu_hw_fmt_layout *layout) +static void _dpu_format_populate_addrs_linear(struct msm_gem_address_space *aspace, + struct drm_framebuffer *fb, + struct dpu_hw_fmt_layout *layout) { unsigned int i; - /* Can now check the pitches given vs pitches expected */ - for (i = 0; i < layout->num_planes; ++i) { - if (layout->plane_pitch[i] > fb->pitches[i]) { - DRM_ERROR("plane %u expected pitch %u, fb %u\n", - i, layout->plane_pitch[i], fb->pitches[i]); - return -EINVAL; - } - } - /* Populate addresses for simple formats here */ - for (i = 0; i < layout->num_planes; ++i) { - if (aspace) - layout->plane_addr[i] = - msm_framebuffer_iova(fb, aspace, i); - if (!layout->plane_addr[i]) { - DRM_ERROR("failed to retrieve base addr\n"); - return -EFAULT; - } - } - - return 0; + for (i = 0; i < layout->num_planes; ++i) + layout->plane_addr[i] = msm_framebuffer_iova(fb, aspace, i); } -int dpu_format_populate_layout( - struct msm_gem_address_space *aspace, - struct drm_framebuffer *fb, - struct dpu_hw_fmt_layout *layout) +/** + * dpu_format_populate_addrs - populate buffer addresses based on + * mmu, fb, and format found in the fb + * @aspace: address space pointer + * @fb: framebuffer pointer + * @layout: format layout structure to populate + */ +void dpu_format_populate_addrs(struct msm_gem_address_space *aspace, + struct drm_framebuffer *fb, + struct dpu_hw_fmt_layout *layout) { - int ret; - - if (!fb || !layout) { - DRM_ERROR("invalid arguments\n"); - return -EINVAL; - } + const struct msm_format *fmt; - if ((fb->width > DPU_MAX_IMG_WIDTH) || - (fb->height > DPU_MAX_IMG_HEIGHT)) { - DRM_ERROR("image dimensions outside max range\n"); - return -ERANGE; - } - - layout->format = msm_framebuffer_format(fb); - - /* Populate the plane sizes etc via get_format */ - ret = dpu_format_get_plane_sizes(layout->format, fb->width, fb->height, - layout, fb->pitches); - if (ret) - return ret; + fmt = msm_framebuffer_format(fb); /* Populate the addresses given the fb */ - if (MSM_FORMAT_IS_UBWC(layout->format) || - MSM_FORMAT_IS_TILE(layout->format)) - ret = _dpu_format_populate_addrs_ubwc(aspace, fb, layout); + if (MSM_FORMAT_IS_UBWC(fmt) || + MSM_FORMAT_IS_TILE(fmt)) + _dpu_format_populate_addrs_ubwc(aspace, fb, layout); else - ret = _dpu_format_populate_addrs_linear(aspace, fb, layout); - - return ret; -} - -int dpu_format_check_modified_format( - const struct msm_kms *kms, - const struct msm_format *fmt, - const struct drm_mode_fb_cmd2 *cmd, - struct drm_gem_object **bos) -{ - const struct drm_format_info *info; - struct dpu_hw_fmt_layout layout; - uint32_t bos_total_size = 0; - int ret, i; - - if (!fmt || !cmd || !bos) { - DRM_ERROR("invalid arguments\n"); - return -EINVAL; - } - - info = drm_format_info(fmt->pixel_format); - if (!info) - return -EINVAL; - - ret = dpu_format_get_plane_sizes(fmt, cmd->width, cmd->height, - &layout, cmd->pitches); - if (ret) - return ret; - - for (i = 0; i < info->num_planes; i++) { - if (!bos[i]) { - DRM_ERROR("invalid handle for plane %d\n", i); - return -EINVAL; - } - if ((i == 0) || (bos[i] != bos[0])) - bos_total_size += bos[i]->size; - } - - if (bos_total_size < layout.total_size) { - DRM_ERROR("buffers total size too small %u expected %u\n", - bos_total_size, layout.total_size); - return -EINVAL; - } - - return 0; + _dpu_format_populate_addrs_linear(aspace, fb, layout); } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h index 210d0ed5f0af..c6145d43aa3f 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h @@ -31,35 +31,12 @@ static inline bool dpu_find_format(u32 format, const u32 *supported_formats, return false; } -/** - * dpu_format_check_modified_format - validate format and buffers for - * dpu non-standard, i.e. modified format - * @kms: kms driver - * @msm_fmt: pointer to the msm_fmt base pointer of an msm_format - * @cmd: fb_cmd2 structure user request - * @bos: gem buffer object list - * - * Return: error code on failure, 0 on success - */ -int dpu_format_check_modified_format( - const struct msm_kms *kms, - const struct msm_format *msm_fmt, - const struct drm_mode_fb_cmd2 *cmd, - struct drm_gem_object **bos); +void dpu_format_populate_addrs(struct msm_gem_address_space *aspace, + struct drm_framebuffer *fb, + struct dpu_hw_fmt_layout *layout); -/** - * dpu_format_populate_layout - populate the given format layout based on - * mmu, fb, and format found in the fb - * @aspace: address space pointer - * @fb: framebuffer pointer - * @fmtl: format layout structure to populate - * - * Return: error code on failure, -EAGAIN if success but the addresses - * are the same as before or 0 if new addresses were populated - */ -int dpu_format_populate_layout( - struct msm_gem_address_space *aspace, +int dpu_format_populate_plane_sizes( struct drm_framebuffer *fb, - struct dpu_hw_fmt_layout *fmtl); + struct dpu_hw_fmt_layout *layout); #endif /*_DPU_FORMATS_H */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c index dcb4fd85e73b..2cbf41f33cc0 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c @@ -21,6 +21,16 @@ (VIG_BASE_MASK | \ BIT(DPU_SSPP_CSC_10BIT)) +#define VIG_MSM8953_MASK \ + (BIT(DPU_SSPP_QOS) |\ + BIT(DPU_SSPP_SCALER_QSEED2) |\ + BIT(DPU_SSPP_CSC)) + +#define VIG_MSM8996_MASK \ + (BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_CDP) |\ + BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_SCALER_QSEED2) |\ + BIT(DPU_SSPP_CSC)) + #define VIG_MSM8998_MASK \ (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3_COMPATIBLE)) @@ -32,6 +42,12 @@ #define VIG_QCM2290_MASK (VIG_BASE_MASK | BIT(DPU_SSPP_QOS_8LVL)) +#define DMA_MSM8953_MASK \ + (BIT(DPU_SSPP_QOS)) + +#define DMA_MSM8996_MASK \ + (BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_CDP)) + #define DMA_MSM8998_MASK \ (BIT(DPU_SSPP_QOS) |\ BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_TS_PREFILL_REC1) |\ @@ -57,9 +73,19 @@ #define DMA_CURSOR_SDM845_MASK_SDMA \ (DMA_CURSOR_SDM845_MASK | BIT(DPU_SSPP_SMART_DMA_V2)) +#define DMA_CURSOR_MSM8996_MASK \ + (DMA_MSM8996_MASK | BIT(DPU_SSPP_CURSOR)) + #define DMA_CURSOR_MSM8998_MASK \ (DMA_MSM8998_MASK | BIT(DPU_SSPP_CURSOR)) +#define RGB_MSM8953_MASK \ + (BIT(DPU_SSPP_QOS)) + +#define RGB_MSM8996_MASK \ + (BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_CDP) |\ + BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_SCALER_RGB)) + #define MIXER_MSM8998_MASK \ (BIT(DPU_MIXER_SOURCESPLIT)) @@ -69,6 +95,12 @@ #define MIXER_QCM2290_MASK \ (BIT(DPU_DIM_LAYER) | BIT(DPU_MIXER_COMBINED_ALPHA)) +#define PINGPONG_MSM8996_MASK \ + (BIT(DPU_PINGPONG_DSC)) + +#define PINGPONG_MSM8996_TE2_MASK \ + (PINGPONG_MSM8996_MASK | BIT(DPU_PINGPONG_TE2)) + #define PINGPONG_SDM845_MASK \ (BIT(DPU_PINGPONG_DITHER) | BIT(DPU_PINGPONG_DSC)) @@ -115,10 +147,6 @@ #define MAX_HORZ_DECIMATION 4 #define MAX_VERT_DECIMATION 4 -#define MAX_UPSCALE_RATIO 20 -#define MAX_DOWNSCALE_RATIO 4 -#define SSPP_UNITY_SCALE 1 - #define STRCAT(X, Y) (X Y) static const uint32_t plane_formats[] = { @@ -276,8 +304,6 @@ static const u32 wb2_formats_rgb_yuv[] = { /* SSPP common configuration */ #define _VIG_SBLK(scaler_ver) \ { \ - .maxdwnscale = MAX_DOWNSCALE_RATIO, \ - .maxupscale = MAX_UPSCALE_RATIO, \ .scaler_blk = {.name = "scaler", \ .version = scaler_ver, \ .base = 0xa00, .len = 0xa0,}, \ @@ -285,15 +311,11 @@ static const u32 wb2_formats_rgb_yuv[] = { .base = 0x1a00, .len = 0x100,}, \ .format_list = plane_formats_yuv, \ .num_formats = ARRAY_SIZE(plane_formats_yuv), \ - .virt_format_list = plane_formats, \ - .virt_num_formats = ARRAY_SIZE(plane_formats), \ .rotation_cfg = NULL, \ } #define _VIG_SBLK_ROT(scaler_ver, rot_cfg) \ { \ - .maxdwnscale = MAX_DOWNSCALE_RATIO, \ - .maxupscale = MAX_UPSCALE_RATIO, \ .scaler_blk = {.name = "scaler", \ .version = scaler_ver, \ .base = 0xa00, .len = 0xa0,}, \ @@ -301,29 +323,40 @@ static const u32 wb2_formats_rgb_yuv[] = { .base = 0x1a00, .len = 0x100,}, \ .format_list = plane_formats_yuv, \ .num_formats = ARRAY_SIZE(plane_formats_yuv), \ - .virt_format_list = plane_formats, \ - .virt_num_formats = ARRAY_SIZE(plane_formats), \ .rotation_cfg = rot_cfg, \ } #define _VIG_SBLK_NOSCALE() \ { \ - .maxdwnscale = SSPP_UNITY_SCALE, \ - .maxupscale = SSPP_UNITY_SCALE, \ .format_list = plane_formats, \ .num_formats = ARRAY_SIZE(plane_formats), \ - .virt_format_list = plane_formats, \ - .virt_num_formats = ARRAY_SIZE(plane_formats), \ + } + +/* qseed2 is not supported, so disabled scaling */ +#define _VIG_SBLK_QSEED2() \ + { \ + .scaler_blk = {.name = "scaler", \ + /* no version for qseed2 */ \ + .base = 0x200, .len = 0xa0,}, \ + .csc_blk = {.name = "csc", \ + .base = 0x320, .len = 0x100,}, \ + .format_list = plane_formats_yuv, \ + .num_formats = ARRAY_SIZE(plane_formats_yuv), \ + .rotation_cfg = NULL, \ + } + +#define _RGB_SBLK() \ + { \ + .scaler_blk = {.name = "scaler", \ + .base = 0x200, .len = 0x28,}, \ + .format_list = plane_formats, \ + .num_formats = ARRAY_SIZE(plane_formats), \ } #define _DMA_SBLK() \ { \ - .maxdwnscale = SSPP_UNITY_SCALE, \ - .maxupscale = SSPP_UNITY_SCALE, \ .format_list = plane_formats, \ .num_formats = ARRAY_SIZE(plane_formats), \ - .virt_format_list = plane_formats, \ - .virt_num_formats = ARRAY_SIZE(plane_formats), \ } static const struct dpu_rotation_cfg dpu_rot_sc7280_cfg_v2 = { @@ -332,6 +365,9 @@ static const struct dpu_rotation_cfg dpu_rot_sc7280_cfg_v2 = { .rot_format_list = rotation_v2_formats, }; +static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed2 = + _VIG_SBLK_QSEED2(); + static const struct dpu_sspp_sub_blks dpu_vig_sblk_noscale = _VIG_SBLK_NOSCALE(); @@ -363,6 +399,8 @@ static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_3_2 = static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_3_3 = _VIG_SBLK(SSPP_SCALER_VER(3, 3)); +static const struct dpu_sspp_sub_blks dpu_rgb_sblk = _RGB_SBLK(); + static const struct dpu_sspp_sub_blks dpu_dma_sblk = _DMA_SBLK(); /************************************************************* @@ -427,6 +465,15 @@ static const struct dpu_dspp_sub_blks sdm845_dspp_sblk = { /************************************************************* * PINGPONG sub blocks config *************************************************************/ +static const struct dpu_pingpong_sub_blks msm8996_pp_sblk_te = { + .te2 = {.name = "te2", .base = 0x2000, .len = 0x0, + .version = 0x1}, +}; + +static const struct dpu_pingpong_sub_blks msm8996_pp_sblk = { + /* No dither block */ +}; + static const struct dpu_pingpong_sub_blks sdm845_pp_sblk_te = { .te2 = {.name = "te2", .base = 0x2000, .len = 0x0, .version = 0x1}, @@ -492,6 +539,34 @@ static const struct dpu_vbif_dynamic_ot_cfg msm8998_ot_rdwr_cfg[] = { }, }; +static const struct dpu_vbif_cfg msm8996_vbif[] = { + { + .name = "vbif_rt", .id = VBIF_RT, + .base = 0, .len = 0x1040, + .default_ot_rd_limit = 32, + .default_ot_wr_limit = 16, + .features = BIT(DPU_VBIF_QOS_REMAP) | BIT(DPU_VBIF_QOS_OTLIM), + .xin_halt_timeout = 0x4000, + .qos_rp_remap_size = 0x20, + .dynamic_ot_rd_tbl = { + .count = ARRAY_SIZE(msm8998_ot_rdwr_cfg), + .cfg = msm8998_ot_rdwr_cfg, + }, + .dynamic_ot_wr_tbl = { + .count = ARRAY_SIZE(msm8998_ot_rdwr_cfg), + .cfg = msm8998_ot_rdwr_cfg, + }, + .qos_rt_tbl = { + .npriority_lvl = ARRAY_SIZE(msm8998_rt_pri_lvl), + .priority_lvl = msm8998_rt_pri_lvl, + }, + .qos_nrt_tbl = { + .npriority_lvl = ARRAY_SIZE(msm8998_nrt_pri_lvl), + .priority_lvl = msm8998_nrt_pri_lvl, + }, + }, +}; + static const struct dpu_vbif_cfg msm8998_vbif[] = { { .name = "vbif_rt", .id = VBIF_RT, @@ -675,6 +750,11 @@ static const struct dpu_qos_lut_entry sc7180_qos_nrt[] = { * Hardware catalog *************************************************************/ +#include "catalog/dpu_1_7_msm8996.h" +#include "catalog/dpu_1_14_msm8937.h" +#include "catalog/dpu_1_15_msm8917.h" +#include "catalog/dpu_1_16_msm8953.h" + #include "catalog/dpu_3_0_msm8998.h" #include "catalog/dpu_3_2_sdm660.h" #include "catalog/dpu_3_3_sdm630.h" @@ -699,6 +779,7 @@ static const struct dpu_qos_lut_entry sc7180_qos_nrt[] = { #include "catalog/dpu_8_0_sc8280xp.h" #include "catalog/dpu_8_1_sm8450.h" +#include "catalog/dpu_8_4_sa8775p.h" #include "catalog/dpu_9_0_sm8550.h" diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h index 37e18e820a20..c701d18c3522 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h @@ -21,8 +21,8 @@ #define DPU_HW_BLK_NAME_LEN 16 -#define MAX_IMG_WIDTH 0x3fff -#define MAX_IMG_HEIGHT 0x3fff +#define DPU_MAX_IMG_WIDTH 0x3fff +#define DPU_MAX_IMG_HEIGHT 0x3fff #define CRTC_DUAL_MIXERS 2 @@ -364,21 +364,15 @@ struct dpu_caps { /** * struct dpu_sspp_sub_blks : SSPP sub-blocks * common: Pointer to common configurations shared by sub blocks - * @maxdwnscale: max downscale ratio supported(without DECIMATION) - * @maxupscale: maxupscale ratio supported * @max_per_pipe_bw: maximum allowable bandwidth of this pipe in kBps * @qseed_ver: qseed version * @scaler_blk: * @csc_blk: * @format_list: Pointer to list of supported formats * @num_formats: Number of supported formats - * @virt_format_list: Pointer to list of supported formats for virtual planes - * @virt_num_formats: Number of supported formats for virtual planes * @dpu_rotation_cfg: inline rotation configuration */ struct dpu_sspp_sub_blks { - u32 maxdwnscale; - u32 maxupscale; u32 max_per_pipe_bw; u32 qseed_ver; struct dpu_scaler_blk scaler_blk; @@ -386,8 +380,6 @@ struct dpu_sspp_sub_blks { const u32 *format_list; u32 num_formats; - const u32 *virt_format_list; - u32 virt_num_formats; const struct dpu_rotation_cfg *rotation_cfg; }; @@ -831,6 +823,10 @@ struct dpu_mdss_cfg { const struct dpu_format_extended *vig_formats; }; +extern const struct dpu_mdss_cfg dpu_msm8917_cfg; +extern const struct dpu_mdss_cfg dpu_msm8937_cfg; +extern const struct dpu_mdss_cfg dpu_msm8953_cfg; +extern const struct dpu_mdss_cfg dpu_msm8996_cfg; extern const struct dpu_mdss_cfg dpu_msm8998_cfg; extern const struct dpu_mdss_cfg dpu_sdm630_cfg; extern const struct dpu_mdss_cfg dpu_sdm660_cfg; @@ -850,6 +846,7 @@ extern const struct dpu_mdss_cfg dpu_sm8350_cfg; extern const struct dpu_mdss_cfg dpu_sc7280_cfg; extern const struct dpu_mdss_cfg dpu_sc8280xp_cfg; extern const struct dpu_mdss_cfg dpu_sm8450_cfg; +extern const struct dpu_mdss_cfg dpu_sa8775p_cfg; extern const struct dpu_mdss_cfg dpu_sm8550_cfg; extern const struct dpu_mdss_cfg dpu_sm8650_cfg; extern const struct dpu_mdss_cfg dpu_x1e80100_cfg; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c index 55d2768a6d4d..ae1534c49ae0 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c @@ -222,6 +222,14 @@ static void dpu_hw_cdm_bind_pingpong_blk(struct dpu_hw_cdm *ctx, const enum dpu_ DPU_REG_WRITE(c, CDM_MUX, mux_cfg); } +/** + * dpu_hw_cdm_init - initializes the cdm hw driver object. + * should be called once before accessing every cdm. + * @dev: DRM device handle + * @cfg: CDM catalog entry for which driver object is required + * @addr : mapped register io address of MDSS + * @mdss_rev: mdss hw core revision + */ struct dpu_hw_cdm *dpu_hw_cdm_init(struct drm_device *dev, const struct dpu_cdm_cfg *cfg, void __iomem *addr, const struct dpu_mdss_version *mdss_rev) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h index ec71c9886d75..6bb3476a05f8 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h @@ -122,14 +122,6 @@ struct dpu_hw_cdm { struct dpu_hw_cdm_ops ops; }; -/** - * dpu_hw_cdm_init - initializes the cdm hw driver object. - * should be called once before accessing every cdm. - * @dev: DRM device handle - * @cdm: CDM catalog entry for which driver object is required - * @addr : mapped register io address of MDSS - * @mdss_rev: mdss hw core revision - */ struct dpu_hw_cdm *dpu_hw_cdm_init(struct drm_device *dev, const struct dpu_cdm_cfg *cdm, void __iomem *addr, const struct dpu_mdss_version *mdss_rev); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c index 2e50049f2f85..4893f10d6a58 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c @@ -736,6 +736,15 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops, ops->set_active_pipes = dpu_hw_ctl_set_fetch_pipe_active; }; +/** + * dpu_hw_ctl_init() - Initializes the ctl_path hw driver object. + * Should be called before accessing any ctl_path register. + * @dev: Corresponding device for devres management + * @cfg: ctl_path catalog entry for which driver object is required + * @addr: mapped register io address of MDP + * @mixer_count: Number of mixers in @mixer + * @mixer: Pointer to an array of Layer Mixers defined in the catalog + */ struct dpu_hw_ctl *dpu_hw_ctl_init(struct drm_device *dev, const struct dpu_ctl_cfg *cfg, void __iomem *addr, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h index 4401fdc0f3e4..85c6c835cc87 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h @@ -294,15 +294,6 @@ static inline struct dpu_hw_ctl *to_dpu_hw_ctl(struct dpu_hw_blk *hw) return container_of(hw, struct dpu_hw_ctl, base); } -/** - * dpu_hw_ctl_init() - Initializes the ctl_path hw driver object. - * Should be called before accessing any ctl_path register. - * @dev: Corresponding device for devres management - * @cfg: ctl_path catalog entry for which driver object is required - * @addr: mapped register io address of MDP - * @mixer_count: Number of mixers in @mixer - * @mixer: Pointer to an array of Layer Mixers defined in the catalog - */ struct dpu_hw_ctl *dpu_hw_ctl_init(struct drm_device *dev, const struct dpu_ctl_cfg *cfg, void __iomem *addr, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c index 5e9aad1b2aa2..657200401f57 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c @@ -190,6 +190,13 @@ static void _setup_dsc_ops(struct dpu_hw_dsc_ops *ops, ops->dsc_bind_pingpong_blk = dpu_hw_dsc_bind_pingpong_blk; }; +/** + * dpu_hw_dsc_init() - Initializes the DSC hw driver object. + * @dev: Corresponding device for devres management + * @cfg: DSC catalog entry for which driver object is required + * @addr: Mapped register io address of MDP + * Return: Error code or allocated dpu_hw_dsc context + */ struct dpu_hw_dsc *dpu_hw_dsc_init(struct drm_device *dev, const struct dpu_dsc_cfg *cfg, void __iomem *addr) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h index 989c88d2449b..fc171bdeca48 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h @@ -62,24 +62,10 @@ struct dpu_hw_dsc { struct dpu_hw_dsc_ops ops; }; -/** - * dpu_hw_dsc_init() - Initializes the DSC hw driver object. - * @dev: Corresponding device for devres management - * @cfg: DSC catalog entry for which driver object is required - * @addr: Mapped register io address of MDP - * Return: Error code or allocated dpu_hw_dsc context - */ struct dpu_hw_dsc *dpu_hw_dsc_init(struct drm_device *dev, const struct dpu_dsc_cfg *cfg, void __iomem *addr); -/** - * dpu_hw_dsc_init_1_2() - initializes the v1.2 DSC hw driver object - * @dev: Corresponding device for devres management - * @cfg: DSC catalog entry for which driver object is required - * @addr: Mapped register io address of MDP - * Returns: Error code or allocated dpu_hw_dsc context - */ struct dpu_hw_dsc *dpu_hw_dsc_init_1_2(struct drm_device *dev, const struct dpu_dsc_cfg *cfg, void __iomem *addr); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c index ba193b0376fe..b9c433567262 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c @@ -369,6 +369,13 @@ static void _setup_dcs_ops_1_2(struct dpu_hw_dsc_ops *ops, ops->dsc_bind_pingpong_blk = dpu_hw_dsc_bind_pingpong_blk_1_2; } +/** + * dpu_hw_dsc_init_1_2() - initializes the v1.2 DSC hw driver object + * @dev: Corresponding device for devres management + * @cfg: DSC catalog entry for which driver object is required + * @addr: Mapped register io address of MDP + * Returns: Error code or allocated dpu_hw_dsc context + */ struct dpu_hw_dsc *dpu_hw_dsc_init_1_2(struct drm_device *dev, const struct dpu_dsc_cfg *cfg, void __iomem *addr) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c index b1da88e2935f..829ca272873e 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c @@ -70,6 +70,14 @@ static void _setup_dspp_ops(struct dpu_hw_dspp *c, c->ops.setup_pcc = dpu_setup_dspp_pcc; } +/** + * dpu_hw_dspp_init() - Initializes the DSPP hw driver object. + * should be called once before accessing every DSPP. + * @dev: Corresponding device for devres management + * @cfg: DSPP catalog entry for which driver object is required + * @addr: Mapped register io address of MDP + * Return: pointer to structure or ERR_PTR + */ struct dpu_hw_dspp *dpu_hw_dspp_init(struct drm_device *dev, const struct dpu_dspp_cfg *cfg, void __iomem *addr) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h index 3b435690b6cc..45c26cd49fa3 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h @@ -78,14 +78,6 @@ static inline struct dpu_hw_dspp *to_dpu_hw_dspp(struct dpu_hw_blk *hw) return container_of(hw, struct dpu_hw_dspp, base); } -/** - * dpu_hw_dspp_init() - Initializes the DSPP hw driver object. - * should be called once before accessing every DSPP. - * @dev: Corresponding device for devres management - * @cfg: DSPP catalog entry for which driver object is required - * @addr: Mapped register io address of MDP - * Return: pointer to structure or ERR_PTR - */ struct dpu_hw_dspp *dpu_hw_dspp_init(struct drm_device *dev, const struct dpu_dspp_cfg *cfg, void __iomem *addr); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c index b85881aab047..49bd77a755aa 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c @@ -237,6 +237,11 @@ static void dpu_core_irq_callback_handler(struct dpu_kms *dpu_kms, unsigned int irq_entry->cb(irq_entry->arg); } +/** + * dpu_core_irq - core IRQ handler + * @kms: MSM KMS handle + * @return: interrupt handling status + */ irqreturn_t dpu_core_irq(struct msm_kms *kms) { struct dpu_kms *dpu_kms = to_dpu_kms(kms); @@ -442,6 +447,12 @@ static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms) wmb(); } +/** + * dpu_core_irq_read - IRQ helper function for reading IRQ status + * @dpu_kms: DPU handle + * @irq_idx: irq index + * @return: non-zero if irq detected; otherwise no irq detected + */ u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, unsigned int irq_idx) { @@ -476,6 +487,12 @@ u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, return intr_status; } +/** + * dpu_hw_intr_init(): Initializes the interrupts hw object + * @dev: Corresponding device for devres management + * @addr: mapped register io address of MDP + * @m: pointer to MDSS catalog data + */ struct dpu_hw_intr *dpu_hw_intr_init(struct drm_device *dev, void __iomem *addr, const struct dpu_mdss_cfg *m) @@ -517,6 +534,17 @@ struct dpu_hw_intr *dpu_hw_intr_init(struct drm_device *dev, return intr; } +/** + * dpu_core_irq_register_callback - For registering callback function on IRQ + * interrupt + * @dpu_kms: DPU handle + * @irq_idx: irq index + * @irq_cb: IRQ callback function. + * @irq_arg: IRQ callback argument. + * @return: 0 for success registering callback, otherwise failure + * + * This function supports registration of multiple callbacks for each interrupt. + */ int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, unsigned int irq_idx, void (*irq_cb)(void *arg), @@ -567,6 +595,15 @@ int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, return 0; } +/** + * dpu_core_irq_unregister_callback - For unregistering callback function on IRQ + * interrupt + * @dpu_kms: DPU handle + * @irq_idx: irq index + * @return: 0 for success registering callback, otherwise failure + * + * This function supports registration of multiple callbacks for each interrupt. + */ int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, unsigned int irq_idx) { @@ -628,6 +665,11 @@ static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v) DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_core_irq); +/** + * dpu_debugfs_core_irq_init - register core irq debugfs + * @dpu_kms: pointer to kms + * @parent: debugfs directory root + */ void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms, struct dentry *parent) { @@ -636,6 +678,11 @@ void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms, } #endif +/** + * dpu_core_irq_preinstall - perform pre-installation of core IRQ handler + * @kms: MSM KMS handle + * @return: none + */ void dpu_core_irq_preinstall(struct msm_kms *kms) { struct dpu_kms *dpu_kms = to_dpu_kms(kms); @@ -653,6 +700,11 @@ void dpu_core_irq_preinstall(struct msm_kms *kms) } } +/** + * dpu_core_irq_uninstall - uninstall core IRQ handler + * @kms: MSM KMS handle + * @return: none + */ void dpu_core_irq_uninstall(struct msm_kms *kms) { struct dpu_kms *dpu_kms = to_dpu_kms(kms); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h index 564b750a28fe..142358a105c5 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h @@ -68,12 +68,6 @@ struct dpu_hw_intr { struct dpu_hw_intr_entry irq_tbl[DPU_NUM_IRQS]; }; -/** - * dpu_hw_intr_init(): Initializes the interrupts hw object - * @dev: Corresponding device for devres management - * @addr: mapped register io address of MDP - * @m: pointer to MDSS catalog data - */ struct dpu_hw_intr *dpu_hw_intr_init(struct drm_device *dev, void __iomem *addr, const struct dpu_mdss_cfg *m); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c index 29cb854f831a..fb1d25baa518 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c @@ -547,6 +547,14 @@ static void dpu_hw_intf_program_intf_cmd_cfg(struct dpu_hw_intf *intf, DPU_REG_WRITE(&intf->hw, INTF_CONFIG2, intf_cfg2); } +/** + * dpu_hw_intf_init() - Initializes the INTF driver for the passed + * interface catalog entry. + * @dev: Corresponding device for devres management + * @cfg: interface catalog entry for which driver object is required + * @addr: mapped register io address of MDP + * @mdss_rev: dpu core's major and minor versions + */ struct dpu_hw_intf *dpu_hw_intf_init(struct drm_device *dev, const struct dpu_intf_cfg *cfg, void __iomem *addr, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h index fc23650dfbf0..114be272ac0a 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h @@ -130,14 +130,6 @@ struct dpu_hw_intf { struct dpu_hw_intf_ops ops; }; -/** - * dpu_hw_intf_init() - Initializes the INTF driver for the passed - * interface catalog entry. - * @dev: Corresponding device for devres management - * @cfg: interface catalog entry for which driver object is required - * @addr: mapped register io address of MDP - * @mdss_rev: dpu core's major and minor versions - */ struct dpu_hw_intf *dpu_hw_intf_init(struct drm_device *dev, const struct dpu_intf_cfg *cfg, void __iomem *addr, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c index 1d3ccf3228c6..81b56f066519 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c @@ -158,6 +158,13 @@ static void _setup_mixer_ops(struct dpu_hw_lm_ops *ops, ops->collect_misr = dpu_hw_lm_collect_misr; } +/** + * dpu_hw_lm_init() - Initializes the mixer hw driver object. + * should be called once before accessing every mixer. + * @dev: Corresponding device for devres management + * @cfg: mixer catalog entry for which driver object is required + * @addr: mapped register io address of MDP + */ struct dpu_hw_mixer *dpu_hw_lm_init(struct drm_device *dev, const struct dpu_lm_cfg *cfg, void __iomem *addr) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h index 0a3381755249..6f60fa9b3cd7 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h @@ -93,13 +93,6 @@ static inline struct dpu_hw_mixer *to_dpu_hw_mixer(struct dpu_hw_blk *hw) return container_of(hw, struct dpu_hw_mixer, base); } -/** - * dpu_hw_lm_init() - Initializes the mixer hw driver object. - * should be called once before accessing every mixer. - * @dev: Corresponding device for devres management - * @cfg: mixer catalog entry for which driver object is required - * @addr: mapped register io address of MDP - */ struct dpu_hw_mixer *dpu_hw_lm_init(struct drm_device *dev, const struct dpu_lm_cfg *cfg, void __iomem *addr); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h index a2eff36a2224..f8806a4d317b 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h @@ -293,7 +293,6 @@ enum dpu_3d_blend_mode { /** * struct dpu_hw_fmt_layout - format information of the source pixel data - * @format: pixel format parameters * @num_planes: number of planes (including meta data planes) * @width: image width * @height: image height @@ -303,7 +302,6 @@ enum dpu_3d_blend_mode { * @plane_pitch: pitch of each plane */ struct dpu_hw_fmt_layout { - const struct msm_format *format; uint32_t num_planes; uint32_t width; uint32_t height; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c index ddfa40a959cb..0b3325f9c870 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c @@ -39,6 +39,14 @@ static void _setup_merge_3d_ops(struct dpu_hw_merge_3d *c, c->ops.setup_3d_mode = dpu_hw_merge_3d_setup_3d_mode; }; +/** + * dpu_hw_merge_3d_init() - Initializes the merge_3d driver for the passed + * merge3d catalog entry. + * @dev: Corresponding device for devres management + * @cfg: Pingpong catalog entry for which driver object is required + * @addr: Mapped register io address of MDP + * Return: Error code or allocated dpu_hw_merge_3d context + */ struct dpu_hw_merge_3d *dpu_hw_merge_3d_init(struct drm_device *dev, const struct dpu_merge_3d_cfg *cfg, void __iomem *addr) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h index c192f02ec1ab..6833c0207523 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h @@ -45,14 +45,6 @@ static inline struct dpu_hw_merge_3d *to_dpu_hw_merge_3d(struct dpu_hw_blk *hw) return container_of(hw, struct dpu_hw_merge_3d, base); } -/** - * dpu_hw_merge_3d_init() - Initializes the merge_3d driver for the passed - * merge3d catalog entry. - * @dev: Corresponding device for devres management - * @cfg: Pingpong catalog entry for which driver object is required - * @addr: Mapped register io address of MDP - * Return: Error code or allocated dpu_hw_merge_3d context - */ struct dpu_hw_merge_3d *dpu_hw_merge_3d_init(struct drm_device *dev, const struct dpu_merge_3d_cfg *cfg, void __iomem *addr); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c index 2db4c6fba37a..36c0ec775b92 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c @@ -283,6 +283,15 @@ static int dpu_hw_pp_setup_dsc(struct dpu_hw_pingpong *pp) return 0; } +/** + * dpu_hw_pingpong_init() - initializes the pingpong driver for the passed + * pingpong catalog entry. + * @dev: Corresponding device for devres management + * @cfg: Pingpong catalog entry for which driver object is required + * @addr: Mapped register io address of MDP + * @mdss_rev: dpu core's major and minor versions + * Return: Error code or allocated dpu_hw_pingpong context + */ struct dpu_hw_pingpong *dpu_hw_pingpong_init(struct drm_device *dev, const struct dpu_pingpong_cfg *cfg, void __iomem *addr, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h index a48b69fd79a3..dd99e1c21a1e 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h @@ -118,15 +118,6 @@ static inline struct dpu_hw_pingpong *to_dpu_hw_pingpong(struct dpu_hw_blk *hw) return container_of(hw, struct dpu_hw_pingpong, base); } -/** - * dpu_hw_pingpong_init() - initializes the pingpong driver for the passed - * pingpong catalog entry. - * @dev: Corresponding device for devres management - * @cfg: Pingpong catalog entry for which driver object is required - * @addr: Mapped register io address of MDP - * @mdss_rev: dpu core's major and minor versions - * Return: Error code or allocated dpu_hw_pingpong context - */ struct dpu_hw_pingpong *dpu_hw_pingpong_init(struct drm_device *dev, const struct dpu_pingpong_cfg *cfg, void __iomem *addr, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c index 2c720f1fc1b2..32c7c8084553 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c @@ -672,6 +672,15 @@ int _dpu_hw_sspp_init_debugfs(struct dpu_hw_sspp *hw_pipe, struct dpu_kms *kms, } #endif +/** + * dpu_hw_sspp_init() - Initializes the sspp hw driver object. + * Should be called once before accessing every pipe. + * @dev: Corresponding device for devres management + * @cfg: Pipe catalog entry for which driver object is required + * @addr: Mapped register io address of MDP + * @mdss_data: UBWC / MDSS configuration data + * @mdss_rev: dpu core's major and minor versions + */ struct dpu_hw_sspp *dpu_hw_sspp_init(struct drm_device *dev, const struct dpu_sspp_cfg *cfg, void __iomem *addr, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h index 4a910b808687..56a0edf2a57c 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h @@ -12,6 +12,8 @@ struct dpu_hw_sspp; +#define DPU_SSPP_MAX_PITCH_SIZE 0xffff + /** * Flags */ @@ -142,10 +144,12 @@ struct dpu_hw_pixel_ext { * @src_rect: src ROI, caller takes into account the different operations * such as decimation, flip etc to program this field * @dest_rect: destination ROI. + * @rotation: simplified drm rotation hint */ struct dpu_sw_pipe_cfg { struct drm_rect src_rect; struct drm_rect dst_rect; + unsigned int rotation; }; /** @@ -315,15 +319,7 @@ struct dpu_hw_sspp { }; struct dpu_kms; -/** - * dpu_hw_sspp_init() - Initializes the sspp hw driver object. - * Should be called once before accessing every pipe. - * @dev: Corresponding device for devres management - * @cfg: Pipe catalog entry for which driver object is required - * @addr: Mapped register io address of MDP - * @mdss_data: UBWC / MDSS configuration data - * @mdss_rev: dpu core's major and minor versions - */ + struct dpu_hw_sspp *dpu_hw_sspp_init(struct drm_device *dev, const struct dpu_sspp_cfg *cfg, void __iomem *addr, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c index 0f40eea7f5e2..ad19330de61a 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c @@ -284,6 +284,13 @@ static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops, ops->intf_audio_select = dpu_hw_intf_audio_select; } +/** + * dpu_hw_mdptop_init - initializes the top driver for the passed config + * @dev: Corresponding device for devres management + * @cfg: MDP TOP configuration from catalog + * @addr: Mapped register io address of MDP + * @mdss_rev: dpu core's major and minor versions + */ struct dpu_hw_mdp *dpu_hw_mdptop_init(struct drm_device *dev, const struct dpu_mdp_cfg *cfg, void __iomem *addr, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h index f1ab9fd106e5..04efdcd21ceb 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h @@ -157,18 +157,9 @@ struct dpu_hw_mdp { struct dpu_hw_mdp_ops ops; }; -/** - * dpu_hw_mdptop_init - initializes the top driver for the passed config - * @dev: Corresponding device for devres management - * @cfg: MDP TOP configuration from catalog - * @addr: Mapped register io address of MDP - * @mdss_rev: dpu core's major and minor versions - */ struct dpu_hw_mdp *dpu_hw_mdptop_init(struct drm_device *dev, const struct dpu_mdp_cfg *cfg, void __iomem *addr, const struct dpu_mdss_version *mdss_rev); -void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp); - #endif /*_DPU_HW_TOP_H */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c index 98e34afde2d2..af76ad8a8103 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c @@ -213,6 +213,13 @@ static void _setup_vbif_ops(struct dpu_hw_vbif_ops *ops, ops->set_write_gather_en = dpu_hw_set_write_gather_en; } +/** + * dpu_hw_vbif_init() - Initializes the VBIF driver for the passed + * VBIF catalog entry. + * @dev: Corresponding device for devres management + * @cfg: VBIF catalog entry for which driver object is required + * @addr: Mapped register io address of MDSS + */ struct dpu_hw_vbif *dpu_hw_vbif_init(struct drm_device *dev, const struct dpu_vbif_cfg *cfg, void __iomem *addr) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h index e2b4307500e4..285121ec804c 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h @@ -105,13 +105,6 @@ struct dpu_hw_vbif { struct dpu_hw_vbif_ops ops; }; -/** - * dpu_hw_vbif_init() - Initializes the VBIF driver for the passed - * VBIF catalog entry. - * @dev: Corresponding device for devres management - * @cfg: VBIF catalog entry for which driver object is required - * @addr: Mapped register io address of MDSS - */ struct dpu_hw_vbif *dpu_hw_vbif_init(struct drm_device *dev, const struct dpu_vbif_cfg *cfg, void __iomem *addr); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c index 93ff01c889b5..fb9f90957762 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c @@ -64,10 +64,10 @@ static void dpu_hw_wb_setup_outaddress(struct dpu_hw_wb *ctx, } static void dpu_hw_wb_setup_format(struct dpu_hw_wb *ctx, - struct dpu_hw_wb_cfg *data) + struct dpu_hw_wb_cfg *data, + const struct msm_format *fmt) { struct dpu_hw_blk_reg_map *c = &ctx->hw; - const struct msm_format *fmt = data->dest.format; u32 dst_format, pattern, ystride0, ystride1, outsize, chroma_samp; u32 write_config = 0; u32 opmode = 0; @@ -213,6 +213,14 @@ static void _setup_wb_ops(struct dpu_hw_wb_ops *ops, ops->setup_clk_force_ctrl = dpu_hw_wb_setup_clk_force_ctrl; } +/** + * dpu_hw_wb_init() - Initializes the writeback hw driver object. + * @dev: Corresponding device for devres management + * @cfg: wb_path catalog entry for which driver object is required + * @addr: mapped register io address of MDP + * @mdss_rev: dpu core's major and minor versions + * Return: Error code or allocated dpu_hw_wb context + */ struct dpu_hw_wb *dpu_hw_wb_init(struct drm_device *dev, const struct dpu_wb_cfg *cfg, void __iomem *addr, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h index 37497473e16c..ee5e5ab786e1 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h @@ -37,7 +37,8 @@ struct dpu_hw_wb_ops { struct dpu_hw_wb_cfg *wb); void (*setup_outformat)(struct dpu_hw_wb *ctx, - struct dpu_hw_wb_cfg *wb); + struct dpu_hw_wb_cfg *wb, + const struct msm_format *fmt); void (*setup_roi)(struct dpu_hw_wb *ctx, struct dpu_hw_wb_cfg *wb); @@ -74,14 +75,6 @@ struct dpu_hw_wb { struct dpu_hw_wb_ops ops; }; -/** - * dpu_hw_wb_init() - Initializes the writeback hw driver object. - * @dev: Corresponding device for devres management - * @cfg: wb_path catalog entry for which driver object is required - * @addr: mapped register io address of MDP - * @mdss_rev: dpu core's major and minor versions - * Return: Error code or allocated dpu_hw_wb context - */ struct dpu_hw_wb *dpu_hw_wb_init(struct drm_device *dev, const struct dpu_wb_cfg *cfg, void __iomem *addr, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c index 9bcae53c4f45..8b251f87a052 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c @@ -230,6 +230,21 @@ static int dpu_regset32_show(struct seq_file *s, void *data) } DEFINE_SHOW_ATTRIBUTE(dpu_regset32); +/** + * dpu_debugfs_create_regset32 - Create register read back file for debugfs + * + * This function is almost identical to the standard debugfs_create_regset32() + * function, with the main difference being that a list of register + * names/offsets do not need to be provided. The 'read' function simply outputs + * sequential register values over a specified range. + * + * @name: File name within debugfs + * @mode: File mode within debugfs + * @parent: Parent directory entry within debugfs, can be NULL + * @offset: sub-block offset + * @length: sub-block length, in bytes + * @dpu_kms: pointer to dpu kms structure + */ void dpu_debugfs_create_regset32(const char *name, umode_t mode, void *parent, uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms) @@ -1025,7 +1040,6 @@ static const struct msm_kms_funcs kms_funcs = { .complete_commit = dpu_kms_complete_commit, .enable_vblank = dpu_kms_enable_vblank, .disable_vblank = dpu_kms_disable_vblank, - .check_modified_format = dpu_format_check_modified_format, .destroy = dpu_kms_destroy, .snapshot = dpu_kms_mdp_snapshot, #ifdef CONFIG_DEBUG_FS @@ -1061,6 +1075,13 @@ static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms) return 0; } +/** + * dpu_kms_get_clk_rate() - get the clock rate + * @dpu_kms: pointer to dpu_kms structure + * @clock_name: clock name to get the rate + * + * Return: current clock rate + */ unsigned long dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name) { struct clk *clk; @@ -1202,13 +1223,8 @@ static int dpu_kms_hw_init(struct msm_kms *kms) dev->mode_config.min_width = 0; dev->mode_config.min_height = 0; - /* - * max crtc width is equal to the max mixer width * 2 and max height is - * is 4K - */ - dev->mode_config.max_width = - dpu_kms->catalog->caps->max_mixer_width * 2; - dev->mode_config.max_height = 4096; + dev->mode_config.max_width = DPU_MAX_IMG_WIDTH; + dev->mode_config.max_height = DPU_MAX_IMG_HEIGHT; dev->max_vblank_count = 0xffffffff; /* Disable vblank irqs aggressively for power-saving */ @@ -1445,8 +1461,13 @@ static const struct dev_pm_ops dpu_pm_ops = { }; static const struct of_device_id dpu_dt_match[] = { + { .compatible = "qcom,msm8917-mdp5", .data = &dpu_msm8917_cfg, }, + { .compatible = "qcom,msm8937-mdp5", .data = &dpu_msm8937_cfg, }, + { .compatible = "qcom,msm8953-mdp5", .data = &dpu_msm8953_cfg, }, + { .compatible = "qcom,msm8996-mdp5", .data = &dpu_msm8996_cfg, }, { .compatible = "qcom,msm8998-dpu", .data = &dpu_msm8998_cfg, }, { .compatible = "qcom,qcm2290-dpu", .data = &dpu_qcm2290_cfg, }, + { .compatible = "qcom,sa8775p-dpu", .data = &dpu_sa8775p_cfg, }, { .compatible = "qcom,sdm630-mdp5", .data = &dpu_sdm630_cfg, }, { .compatible = "qcom,sdm660-mdp5", .data = &dpu_sdm660_cfg, }, { .compatible = "qcom,sdm670-dpu", .data = &dpu_sdm670_cfg, }, @@ -1473,7 +1494,7 @@ MODULE_DEVICE_TABLE(of, dpu_dt_match); static struct platform_driver dpu_driver = { .probe = dpu_dev_probe, - .remove_new = dpu_dev_remove, + .remove = dpu_dev_remove, .shutdown = msm_kms_shutdown, .driver = { .name = "msm_dpu", diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h index 935ff6fd172c..88d64d43ea1a 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h @@ -145,38 +145,11 @@ struct dpu_global_state * @dpu_debugfs_create_regset32: Create 32-bit register dump file */ -/** - * dpu_debugfs_create_regset32 - Create register read back file for debugfs - * - * This function is almost identical to the standard debugfs_create_regset32() - * function, with the main difference being that a list of register - * names/offsets do not need to be provided. The 'read' function simply outputs - * sequential register values over a specified range. - * - * @name: File name within debugfs - * @mode: File mode within debugfs - * @parent: Parent directory entry within debugfs, can be NULL - * @offset: sub-block offset - * @length: sub-block length, in bytes - * @dpu_kms: pointer to dpu kms structure - */ void dpu_debugfs_create_regset32(const char *name, umode_t mode, void *parent, uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms); /** - * dpu_debugfs_get_root - Return root directory entry for KMS's debugfs - * - * The return value should be passed as the 'parent' argument to subsequent - * debugfs create calls. - * - * @dpu_kms: Pointer to DPU's KMS structure - * - * Return: dentry pointer for DPU's debugfs location - */ -void *dpu_debugfs_get_root(struct dpu_kms *dpu_kms); - -/** * DPU info management functions * These functions/definitions allow for building up a 'dpu_info' structure * containing one or more "key=value\n" entries. @@ -189,13 +162,6 @@ void *dpu_debugfs_get_root(struct dpu_kms *dpu_kms); int dpu_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); void dpu_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); -/** - * dpu_kms_get_clk_rate() - get the clock rate - * @dpu_kms: pointer to dpu_kms structure - * @clock_name: clock name to get the rate - * - * Return: current clock rate - */ unsigned long dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name); #endif /* __dpu_kms_H__ */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c index 29298e066163..3ffac24333a2 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c @@ -528,8 +528,7 @@ static const struct dpu_csc_cfg *_dpu_plane_get_csc(struct dpu_sw_pipe *pipe, static void _dpu_plane_setup_scaler(struct dpu_sw_pipe *pipe, const struct msm_format *fmt, bool color_fill, - struct dpu_sw_pipe_cfg *pipe_cfg, - unsigned int rotation) + struct dpu_sw_pipe_cfg *pipe_cfg) { struct dpu_hw_sspp *pipe_hw = pipe->sspp; const struct drm_format_info *info = drm_format_info(fmt->pixel_format); @@ -552,7 +551,7 @@ static void _dpu_plane_setup_scaler(struct dpu_sw_pipe *pipe, dst_height, &scaler3_cfg, fmt, info->hsub, info->vsub, - rotation); + pipe_cfg->rotation); /* configure pixel extension based on scalar config */ _dpu_plane_setup_pixel_ext(&scaler3_cfg, &pixel_ext, @@ -604,7 +603,7 @@ static void _dpu_plane_color_fill_pipe(struct dpu_plane_state *pstate, if (pipe->sspp->ops.setup_rects) pipe->sspp->ops.setup_rects(pipe, &pipe_cfg); - _dpu_plane_setup_scaler(pipe, fmt, true, &pipe_cfg, pstate->rotation); + _dpu_plane_setup_scaler(pipe, fmt, true, &pipe_cfg); } /** @@ -648,7 +647,6 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane, struct drm_framebuffer *fb = new_state->fb; struct dpu_plane *pdpu = to_dpu_plane(plane); struct dpu_plane_state *pstate = to_dpu_plane_state(new_state); - struct dpu_hw_fmt_layout layout; struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base); int ret; @@ -676,17 +674,6 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane, } } - /* validate framebuffer layout before commit */ - ret = dpu_format_populate_layout(pstate->aspace, - new_state->fb, &layout); - if (ret) { - DPU_ERROR_PLANE(pdpu, "failed to get format layout, %d\n", ret); - if (pstate->aspace) - msm_framebuffer_cleanup(new_state->fb, pstate->aspace, - pstate->needs_dirtyfb); - return ret; - } - return 0; } @@ -708,12 +695,17 @@ static void dpu_plane_cleanup_fb(struct drm_plane *plane, } static int dpu_plane_check_inline_rotation(struct dpu_plane *pdpu, - const struct dpu_sspp_sub_blks *sblk, - struct drm_rect src, const struct msm_format *fmt) + struct dpu_sw_pipe *pipe, + struct drm_rect src, + const struct msm_format *fmt) { + const struct dpu_sspp_sub_blks *sblk = pipe->sspp->cap->sblk; size_t num_formats; const u32 *supported_formats; + if (!test_bit(DPU_SSPP_INLINE_ROTATION, &pipe->sspp->cap->features)) + return -EINVAL; + if (!sblk->rotation_cfg) { DPU_ERROR("invalid rotation cfg\n"); return -EINVAL; @@ -743,6 +735,7 @@ static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu, { uint32_t min_src_size; struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base); + int ret; min_src_size = MSM_FORMAT_IS_YUV(fmt) ? 2 : 1; @@ -780,6 +773,12 @@ static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu, return -EINVAL; } + if (pipe_cfg->rotation & DRM_MODE_ROTATE_90) { + ret = dpu_plane_check_inline_rotation(pdpu, pipe, pipe_cfg->src_rect, fmt); + if (ret) + return ret; + } + /* max clk check */ if (_dpu_plane_calc_clk(mode, pipe_cfg) > kms->perf.max_core_clk_rate) { DPU_DEBUG_PLANE(pdpu, "plane exceeds max mdp core clk limits\n"); @@ -789,37 +788,29 @@ static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu, return 0; } -static int dpu_plane_atomic_check(struct drm_plane *plane, - struct drm_atomic_state *state) +#define MAX_UPSCALE_RATIO 20 +#define MAX_DOWNSCALE_RATIO 4 + +static int dpu_plane_atomic_check_nosspp(struct drm_plane *plane, + struct drm_plane_state *new_plane_state, + const struct drm_crtc_state *crtc_state) { - struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, - plane); - int ret = 0, min_scale; + int i, ret = 0, min_scale, max_scale; struct dpu_plane *pdpu = to_dpu_plane(plane); struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base); u64 max_mdp_clk_rate = kms->perf.max_core_clk_rate; struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state); - struct dpu_sw_pipe *pipe = &pstate->pipe; - struct dpu_sw_pipe *r_pipe = &pstate->r_pipe; - const struct drm_crtc_state *crtc_state = NULL; - const struct msm_format *fmt; struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg; struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->r_pipe_cfg; struct drm_rect fb_rect = { 0 }; uint32_t max_linewidth; - unsigned int rotation; - uint32_t supported_rotations; - const struct dpu_sspp_cfg *pipe_hw_caps = pstate->pipe.sspp->cap; - const struct dpu_sspp_sub_blks *sblk = pstate->pipe.sspp->cap->sblk; - if (new_plane_state->crtc) - crtc_state = drm_atomic_get_new_crtc_state(state, - new_plane_state->crtc); + min_scale = FRAC_16_16(1, MAX_UPSCALE_RATIO); + max_scale = MAX_DOWNSCALE_RATIO << 16; - min_scale = FRAC_16_16(1, sblk->maxupscale); ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state, min_scale, - sblk->maxdwnscale << 16, + max_scale, true, true); if (ret) { DPU_DEBUG_PLANE(pdpu, "Check plane state failed (%d)\n", ret); @@ -828,12 +819,6 @@ static int dpu_plane_atomic_check(struct drm_plane *plane, if (!new_plane_state->visible) return 0; - pipe->multirect_index = DPU_SSPP_RECT_SOLO; - pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE; - r_pipe->multirect_index = DPU_SSPP_RECT_SOLO; - r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE; - r_pipe->sspp = NULL; - pstate->stage = DPU_STAGE_0 + pstate->base.normalized_zpos; if (pstate->stage >= pdpu->catalog->caps->max_mixer_blendstages) { DPU_ERROR("> %d plane stages assigned\n", @@ -841,13 +826,8 @@ static int dpu_plane_atomic_check(struct drm_plane *plane, return -EINVAL; } - pipe_cfg->src_rect = new_plane_state->src; - /* state->src is 16.16, src_rect is not */ - pipe_cfg->src_rect.x1 >>= 16; - pipe_cfg->src_rect.x2 >>= 16; - pipe_cfg->src_rect.y1 >>= 16; - pipe_cfg->src_rect.y2 >>= 16; + drm_rect_fp_to_int(&pipe_cfg->src_rect, &new_plane_state->src); pipe_cfg->dst_rect = new_plane_state->dst; @@ -855,14 +835,22 @@ static int dpu_plane_atomic_check(struct drm_plane *plane, fb_rect.y2 = new_plane_state->fb->height; /* Ensure fb size is supported */ - if (drm_rect_width(&fb_rect) > MAX_IMG_WIDTH || - drm_rect_height(&fb_rect) > MAX_IMG_HEIGHT) { + if (drm_rect_width(&fb_rect) > DPU_MAX_IMG_WIDTH || + drm_rect_height(&fb_rect) > DPU_MAX_IMG_HEIGHT) { DPU_DEBUG_PLANE(pdpu, "invalid framebuffer " DRM_RECT_FMT "\n", DRM_RECT_ARG(&fb_rect)); return -E2BIG; } - fmt = msm_framebuffer_format(new_plane_state->fb); + ret = dpu_format_populate_plane_sizes(new_plane_state->fb, &pstate->layout); + if (ret) { + DPU_ERROR_PLANE(pdpu, "failed to get format plane sizes, %d\n", ret); + return ret; + } + + for (i = 0; i < pstate->layout.num_planes; i++) + if (pstate->layout.plane_pitch[i] > DPU_SSPP_MAX_PITCH_SIZE) + return -E2BIG; max_linewidth = pdpu->catalog->caps->max_linewidth; @@ -872,6 +860,86 @@ static int dpu_plane_atomic_check(struct drm_plane *plane, if ((drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) || _dpu_plane_calc_clk(&crtc_state->adjusted_mode, pipe_cfg) > max_mdp_clk_rate) { + if (drm_rect_width(&pipe_cfg->src_rect) > 2 * max_linewidth) { + DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n", + DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth); + return -E2BIG; + } + + *r_pipe_cfg = *pipe_cfg; + pipe_cfg->src_rect.x2 = (pipe_cfg->src_rect.x1 + pipe_cfg->src_rect.x2) >> 1; + pipe_cfg->dst_rect.x2 = (pipe_cfg->dst_rect.x1 + pipe_cfg->dst_rect.x2) >> 1; + r_pipe_cfg->src_rect.x1 = pipe_cfg->src_rect.x2; + r_pipe_cfg->dst_rect.x1 = pipe_cfg->dst_rect.x2; + } else { + memset(r_pipe_cfg, 0, sizeof(*r_pipe_cfg)); + } + + drm_rect_rotate_inv(&pipe_cfg->src_rect, + new_plane_state->fb->width, new_plane_state->fb->height, + new_plane_state->rotation); + if (r_pipe_cfg->src_rect.x1 != 0) + drm_rect_rotate_inv(&r_pipe_cfg->src_rect, + new_plane_state->fb->width, new_plane_state->fb->height, + new_plane_state->rotation); + + pstate->needs_qos_remap = drm_atomic_crtc_needs_modeset(crtc_state); + + return 0; +} + +static int dpu_plane_atomic_check_sspp(struct drm_plane *plane, + struct drm_atomic_state *state, + const struct drm_crtc_state *crtc_state) +{ + struct drm_plane_state *new_plane_state = + drm_atomic_get_new_plane_state(state, plane); + struct dpu_plane *pdpu = to_dpu_plane(plane); + struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state); + struct dpu_sw_pipe *pipe = &pstate->pipe; + struct dpu_sw_pipe *r_pipe = &pstate->r_pipe; + const struct msm_format *fmt; + struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg; + struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->r_pipe_cfg; + uint32_t max_linewidth; + uint32_t supported_rotations; + const struct dpu_sspp_cfg *pipe_hw_caps; + const struct dpu_sspp_sub_blks *sblk; + int ret = 0; + + pipe_hw_caps = pipe->sspp->cap; + sblk = pipe->sspp->cap->sblk; + + /* + * We already have verified scaling against platform limitations. + * Now check if the SSPP supports scaling at all. + */ + if (!sblk->scaler_blk.len && + ((drm_rect_width(&new_plane_state->src) >> 16 != + drm_rect_width(&new_plane_state->dst)) || + (drm_rect_height(&new_plane_state->src) >> 16 != + drm_rect_height(&new_plane_state->dst)))) + return -ERANGE; + + fmt = msm_framebuffer_format(new_plane_state->fb); + + max_linewidth = pdpu->catalog->caps->max_linewidth; + + supported_rotations = DRM_MODE_REFLECT_MASK | DRM_MODE_ROTATE_0; + + if (pipe_hw_caps->features & BIT(DPU_SSPP_INLINE_ROTATION)) + supported_rotations |= DRM_MODE_ROTATE_90; + + pipe_cfg->rotation = drm_rotation_simplify(new_plane_state->rotation, + supported_rotations); + r_pipe_cfg->rotation = pipe_cfg->rotation; + + ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg, fmt, + &crtc_state->adjusted_mode); + if (ret) + return ret; + + if (drm_rect_width(&r_pipe_cfg->src_rect) != 0) { /* * In parallel multirect case only the half of the usual width * is supported for tiled formats. If we are here, we know that @@ -885,16 +953,11 @@ static int dpu_plane_atomic_check(struct drm_plane *plane, return -E2BIG; } - if (drm_rect_width(&pipe_cfg->src_rect) > 2 * max_linewidth) { - DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n", - DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth); - return -E2BIG; - } - if (drm_rect_width(&pipe_cfg->src_rect) != drm_rect_width(&pipe_cfg->dst_rect) || drm_rect_height(&pipe_cfg->src_rect) != drm_rect_height(&pipe_cfg->dst_rect) || (!test_bit(DPU_SSPP_SMART_DMA_V1, &pipe->sspp->cap->features) && !test_bit(DPU_SSPP_SMART_DMA_V2, &pipe->sspp->cap->features)) || + pipe_cfg->rotation & DRM_MODE_ROTATE_90 || MSM_FORMAT_IS_YUV(fmt)) { DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u, can't use split source\n", DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth); @@ -912,51 +975,48 @@ static int dpu_plane_atomic_check(struct drm_plane *plane, r_pipe->multirect_index = DPU_SSPP_RECT_1; r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL; - *r_pipe_cfg = *pipe_cfg; - pipe_cfg->src_rect.x2 = (pipe_cfg->src_rect.x1 + pipe_cfg->src_rect.x2) >> 1; - pipe_cfg->dst_rect.x2 = (pipe_cfg->dst_rect.x1 + pipe_cfg->dst_rect.x2) >> 1; - r_pipe_cfg->src_rect.x1 = pipe_cfg->src_rect.x2; - r_pipe_cfg->dst_rect.x1 = pipe_cfg->dst_rect.x2; - } - - drm_rect_rotate_inv(&pipe_cfg->src_rect, - new_plane_state->fb->width, new_plane_state->fb->height, - new_plane_state->rotation); - if (r_pipe->sspp) - drm_rect_rotate_inv(&r_pipe_cfg->src_rect, - new_plane_state->fb->width, new_plane_state->fb->height, - new_plane_state->rotation); - - ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg, fmt, &crtc_state->adjusted_mode); - if (ret) - return ret; - - if (r_pipe->sspp) { ret = dpu_plane_atomic_check_pipe(pdpu, r_pipe, r_pipe_cfg, fmt, &crtc_state->adjusted_mode); if (ret) return ret; } - supported_rotations = DRM_MODE_REFLECT_MASK | DRM_MODE_ROTATE_0; + return 0; +} - if (pipe_hw_caps->features & BIT(DPU_SSPP_INLINE_ROTATION)) - supported_rotations |= DRM_MODE_ROTATE_90; +static int dpu_plane_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); + int ret = 0; + struct dpu_plane *pdpu = to_dpu_plane(plane); + struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state); + struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane); + struct dpu_sw_pipe *pipe = &pstate->pipe; + struct dpu_sw_pipe *r_pipe = &pstate->r_pipe; + const struct drm_crtc_state *crtc_state = NULL; - rotation = drm_rotation_simplify(new_plane_state->rotation, - supported_rotations); + if (new_plane_state->crtc) + crtc_state = drm_atomic_get_new_crtc_state(state, + new_plane_state->crtc); - if ((pipe_hw_caps->features & BIT(DPU_SSPP_INLINE_ROTATION)) && - (rotation & DRM_MODE_ROTATE_90)) { - ret = dpu_plane_check_inline_rotation(pdpu, sblk, pipe_cfg->src_rect, fmt); - if (ret) - return ret; - } + pipe->sspp = dpu_rm_get_sspp(&dpu_kms->rm, pdpu->pipe); + r_pipe->sspp = NULL; - pstate->rotation = rotation; - pstate->needs_qos_remap = drm_atomic_crtc_needs_modeset(crtc_state); + ret = dpu_plane_atomic_check_nosspp(plane, new_plane_state, crtc_state); + if (ret) + return ret; - return 0; + if (!new_plane_state->visible) + return 0; + + pipe->multirect_index = DPU_SSPP_RECT_SOLO; + pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE; + r_pipe->multirect_index = DPU_SSPP_RECT_SOLO; + r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE; + + return dpu_plane_atomic_check_sspp(plane, state, crtc_state); } static void dpu_plane_flush_csc(struct dpu_plane *pdpu, struct dpu_sw_pipe *pipe) @@ -981,6 +1041,10 @@ static void dpu_plane_flush_csc(struct dpu_plane *pdpu, struct dpu_sw_pipe *pipe } +/** + * dpu_plane_flush - final plane operations before commit flush + * @plane: Pointer to drm plane structure + */ void dpu_plane_flush(struct drm_plane *plane) { struct dpu_plane *pdpu; @@ -1060,14 +1124,14 @@ static void dpu_plane_sspp_update_pipe(struct drm_plane *plane, pipe_cfg); } - _dpu_plane_setup_scaler(pipe, fmt, false, pipe_cfg, pstate->rotation); + _dpu_plane_setup_scaler(pipe, fmt, false, pipe_cfg); if (pipe->sspp->ops.setup_multirect) pipe->sspp->ops.setup_multirect( pipe); if (pipe->sspp->ops.setup_format) { - unsigned int rotation = pstate->rotation; + unsigned int rotation = pipe_cfg->rotation; src_flags = 0x0; @@ -1101,7 +1165,8 @@ static void dpu_plane_sspp_update_pipe(struct drm_plane *plane, _dpu_plane_set_qos_remap(plane, pipe); } -static void dpu_plane_sspp_atomic_update(struct drm_plane *plane) +static void dpu_plane_sspp_atomic_update(struct drm_plane *plane, + struct drm_plane_state *new_state) { struct dpu_plane *pdpu = to_dpu_plane(plane); struct drm_plane_state *state = plane->state; @@ -1115,17 +1180,6 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane) msm_framebuffer_format(fb); struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg; struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->r_pipe_cfg; - struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base); - struct msm_gem_address_space *aspace = kms->base.aspace; - struct dpu_hw_fmt_layout layout; - bool layout_valid = false; - int ret; - - ret = dpu_format_populate_layout(aspace, fb, &layout); - if (ret) - DPU_ERROR_PLANE(pdpu, "failed to get format layout, %d\n", ret); - else - layout_valid = true; pstate->pending = true; @@ -1133,6 +1187,8 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane) pstate->needs_qos_remap |= (is_rt_pipe != pdpu->is_rt_pipe); pdpu->is_rt_pipe = is_rt_pipe; + dpu_format_populate_addrs(pstate->aspace, new_state->fb, &pstate->layout); + DPU_DEBUG_PLANE(pdpu, "FB[%u] " DRM_RECT_FP_FMT "->crtc%u " DRM_RECT_FMT ", %p4cc ubwc %d\n", fb->base.id, DRM_RECT_FP_ARG(&state->src), crtc->base.id, DRM_RECT_ARG(&state->dst), @@ -1140,12 +1196,12 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane) dpu_plane_sspp_update_pipe(plane, pipe, pipe_cfg, fmt, drm_mode_vrefresh(&crtc->mode), - layout_valid ? &layout : NULL); + &pstate->layout); if (r_pipe->sspp) { dpu_plane_sspp_update_pipe(plane, r_pipe, r_pipe_cfg, fmt, drm_mode_vrefresh(&crtc->mode), - layout_valid ? &layout : NULL); + &pstate->layout); } if (pstate->needs_qos_remap) @@ -1197,7 +1253,7 @@ static void dpu_plane_atomic_update(struct drm_plane *plane, if (!new_state->visible) { _dpu_plane_atomic_disable(plane); } else { - dpu_plane_sspp_atomic_update(plane); + dpu_plane_sspp_atomic_update(plane, new_state); } } @@ -1301,7 +1357,6 @@ static void dpu_plane_reset(struct drm_plane *plane) { struct dpu_plane *pdpu; struct dpu_plane_state *pstate; - struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane); if (!plane) { DPU_ERROR("invalid plane\n"); @@ -1323,16 +1378,6 @@ static void dpu_plane_reset(struct drm_plane *plane) return; } - /* - * Set the SSPP here until we have proper virtualized DPU planes. - * This is the place where the state is allocated, so fill it fully. - */ - pstate->pipe.sspp = dpu_rm_get_sspp(&dpu_kms->rm, pdpu->pipe); - pstate->pipe.multirect_index = DPU_SSPP_RECT_SOLO; - pstate->pipe.multirect_mode = DPU_SSPP_MULTIRECT_NONE; - - pstate->r_pipe.sspp = NULL; - __drm_atomic_helper_plane_reset(plane, &pstate->base); } @@ -1388,7 +1433,15 @@ static const struct drm_plane_helper_funcs dpu_plane_helper_funcs = { .atomic_update = dpu_plane_atomic_update, }; -/* initialize plane */ +/** + * dpu_plane_init - create new dpu plane for the given pipe + * @dev: Pointer to DRM device + * @pipe: dpu hardware pipe identifier + * @type: Plane type - PRIMARY/OVERLAY/CURSOR + * @possible_crtcs: bitmask of crtc that can be attached to the given pipe + * + * Initialize the plane. + */ struct drm_plane *dpu_plane_init(struct drm_device *dev, uint32_t pipe, enum drm_plane_type type, unsigned long possible_crtcs) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h index abd6b21a049b..97090ca7842b 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h @@ -30,7 +30,7 @@ * @plane_fetch_bw: calculated BW per plane * @plane_clk: calculated clk per plane * @needs_dirtyfb: whether attached CRTC needs pixel data explicitly flushed - * @rotation: simplified drm rotation hint + * @layout: framebuffer memory layout */ struct dpu_plane_state { struct drm_plane_state base; @@ -47,46 +47,21 @@ struct dpu_plane_state { u64 plane_clk; bool needs_dirtyfb; - unsigned int rotation; + + struct dpu_hw_fmt_layout layout; }; #define to_dpu_plane_state(x) \ container_of(x, struct dpu_plane_state, base) -/** - * dpu_plane_flush - final plane operations before commit flush - * @plane: Pointer to drm plane structure - */ void dpu_plane_flush(struct drm_plane *plane); -/** - * dpu_plane_set_error: enable/disable error condition - * @plane: pointer to drm_plane structure - */ void dpu_plane_set_error(struct drm_plane *plane, bool error); -/** - * dpu_plane_init - create new dpu plane for the given pipe - * @dev: Pointer to DRM device - * @pipe: dpu hardware pipe identifier - * @type: Plane type - PRIMARY/OVERLAY/CURSOR - * @possible_crtcs: bitmask of crtc that can be attached to the given pipe - * - */ struct drm_plane *dpu_plane_init(struct drm_device *dev, uint32_t pipe, enum drm_plane_type type, unsigned long possible_crtcs); -/** - * dpu_plane_color_fill - enables color fill on plane - * @plane: Pointer to DRM plane object - * @color: RGB fill color value, [23..16] Blue, [15..8] Green, [7..0] Red - * @alpha: 8-bit fill alpha value, 255 selects 100% alpha - * Returns: 0 on success - */ -int dpu_plane_color_fill(struct drm_plane *plane, - uint32_t color, uint32_t alpha); - #ifdef CONFIG_DEBUG_FS void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable); #else diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c index 44938ba7a2b7..c247af03dc8e 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c @@ -34,6 +34,16 @@ struct dpu_rm_requirements { struct msm_display_topology topology; }; +/** + * dpu_rm_init - Read hardware catalog and create reservation tracking objects + * for all HW blocks. + * @dev: Corresponding device for devres management + * @rm: DPU Resource Manager handle + * @cat: Pointer to hardware catalog + * @mdss_data: Pointer to MDSS / UBWC configuration + * @mmio: mapped register io address of MDP + * @return: 0 on Success otherwise -ERROR + */ int dpu_rm_init(struct drm_device *dev, struct dpu_rm *rm, const struct dpu_mdss_cfg *cat, @@ -641,6 +651,13 @@ static void _dpu_rm_clear_mapping(uint32_t *res_mapping, int cnt, } } +/** + * dpu_rm_release - Given the encoder for the display chain, release any + * HW blocks previously reserved for that use case. + * @global_state: resources shared across multiple kms objects + * @enc: DRM Encoder handle + * @return: 0 on Success otherwise -ERROR + */ void dpu_rm_release(struct dpu_global_state *global_state, struct drm_encoder *enc) { @@ -657,6 +674,20 @@ void dpu_rm_release(struct dpu_global_state *global_state, _dpu_rm_clear_mapping(&global_state->cdm_to_enc_id, 1, enc->base.id); } +/** + * dpu_rm_reserve - Given a CRTC->Encoder->Connector display chain, analyze + * the use connections and user requirements, specified through related + * topology control properties, and reserve hardware blocks to that + * display chain. + * HW blocks can then be accessed through dpu_rm_get_* functions. + * HW Reservations should be released via dpu_rm_release_hw. + * @rm: DPU Resource Manager handle + * @global_state: resources shared across multiple kms objects + * @enc: DRM Encoder handle + * @crtc_state: Proposed Atomic DRM CRTC State handle + * @topology: Pointer to topology info for the display + * @return: 0 on Success otherwise -ERROR + */ int dpu_rm_reserve( struct dpu_rm *rm, struct dpu_global_state *global_state, @@ -694,6 +725,16 @@ int dpu_rm_reserve( return ret; } +/** + * dpu_rm_get_assigned_resources - Get hw resources of the given type that are + * assigned to this encoder + * @rm: DPU Resource Manager handle + * @global_state: resources shared across multiple kms objects + * @enc_id: encoder id requesting for allocation + * @type: resource type to return data for + * @blks: pointer to the array to be filled by HW resources + * @blks_size: size of the @blks array + */ int dpu_rm_get_assigned_resources(struct dpu_rm *rm, struct dpu_global_state *global_state, uint32_t enc_id, enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size) @@ -772,6 +813,11 @@ static void dpu_rm_print_state_helper(struct drm_printer *p, } +/** + * dpu_rm_print_state - output the RM private state + * @p: DRM printer + * @global_state: global state + */ void dpu_rm_print_state(struct drm_printer *p, const struct dpu_global_state *global_state) { diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h index e63db8ace6b9..ea0e49cb7b0d 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h @@ -38,62 +38,40 @@ struct dpu_rm { }; /** - * dpu_rm_init - Read hardware catalog and create reservation tracking objects - * for all HW blocks. - * @dev: Corresponding device for devres management - * @rm: DPU Resource Manager handle - * @cat: Pointer to hardware catalog - * @mdss_data: Pointer to MDSS / UBWC configuration - * @mmio: mapped register io address of MDP - * @Return: 0 on Success otherwise -ERROR + * struct msm_display_topology - defines a display topology pipeline + * @num_lm: number of layer mixers used + * @num_intf: number of interfaces the panel is mounted on + * @num_dspp: number of dspp blocks used + * @num_dsc: number of Display Stream Compression (DSC) blocks used + * @needs_cdm: indicates whether cdm block is needed for this display topology */ +struct msm_display_topology { + u32 num_lm; + u32 num_intf; + u32 num_dspp; + u32 num_dsc; + bool needs_cdm; +}; + int dpu_rm_init(struct drm_device *dev, struct dpu_rm *rm, const struct dpu_mdss_cfg *cat, const struct msm_mdss_data *mdss_data, void __iomem *mmio); -/** - * dpu_rm_reserve - Given a CRTC->Encoder->Connector display chain, analyze - * the use connections and user requirements, specified through related - * topology control properties, and reserve hardware blocks to that - * display chain. - * HW blocks can then be accessed through dpu_rm_get_* functions. - * HW Reservations should be released via dpu_rm_release_hw. - * @rm: DPU Resource Manager handle - * @drm_enc: DRM Encoder handle - * @crtc_state: Proposed Atomic DRM CRTC State handle - * @topology: Pointer to topology info for the display - * @Return: 0 on Success otherwise -ERROR - */ int dpu_rm_reserve(struct dpu_rm *rm, struct dpu_global_state *global_state, struct drm_encoder *drm_enc, struct drm_crtc_state *crtc_state, struct msm_display_topology topology); -/** - * dpu_rm_reserve - Given the encoder for the display chain, release any - * HW blocks previously reserved for that use case. - * @rm: DPU Resource Manager handle - * @enc: DRM Encoder handle - * @Return: 0 on Success otherwise -ERROR - */ void dpu_rm_release(struct dpu_global_state *global_state, struct drm_encoder *enc); -/** - * Get hw resources of the given type that are assigned to this encoder. - */ int dpu_rm_get_assigned_resources(struct dpu_rm *rm, struct dpu_global_state *global_state, uint32_t enc_id, enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size); -/** - * dpu_rm_print_state - output the RM private state - * @p: DRM printer - * @global_state: global state - */ void dpu_rm_print_state(struct drm_printer *p, const struct dpu_global_state *global_state); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c index 47c02b98eac3..2a551e455aa3 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c @@ -204,6 +204,11 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms, vbif->ops.set_halt_ctrl(vbif, params->xin_id, false); } +/** + * dpu_vbif_set_qos_remap - set QoS priority level remap + * @dpu_kms: DPU handler + * @params: Pointer to QoS configuration parameters + */ void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms, struct dpu_vbif_set_qos_params *params) { @@ -245,6 +250,10 @@ void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms, } } +/** + * dpu_vbif_clear_errors - clear any vbif errors + * @dpu_kms: DPU handler + */ void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms) { struct dpu_hw_vbif *vbif; @@ -262,6 +271,10 @@ void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms) } } +/** + * dpu_vbif_init_memtypes - initialize xin memory types for vbif + * @dpu_kms: DPU handler + */ void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms) { struct dpu_hw_vbif *vbif; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h index e1b1f7f4e4be..62e47ae1e3ee 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h @@ -38,32 +38,14 @@ struct dpu_vbif_set_qos_params { bool is_rt; }; -/** - * dpu_vbif_set_ot_limit - set OT limit for vbif client - * @dpu_kms: DPU handler - * @params: Pointer to OT configuration parameters - */ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms, struct dpu_vbif_set_ot_params *params); -/** - * dpu_vbif_set_qos_remap - set QoS priority level remap - * @dpu_kms: DPU handler - * @params: Pointer to QoS configuration parameters - */ void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms, struct dpu_vbif_set_qos_params *params); -/** - * dpu_vbif_clear_errors - clear any vbif errors - * @dpu_kms: DPU handler - */ void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms); -/** - * dpu_vbif_init_memtypes - initialize xin memory types for vbif - * @dpu_kms: DPU handler - */ void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms); void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root); diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c index 6e4e74f9d63d..c469e66cfc11 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c @@ -568,7 +568,7 @@ MODULE_DEVICE_TABLE(of, mdp4_dt_match); static struct platform_driver mdp4_platform_driver = { .probe = mdp4_probe, - .remove_new = mdp4_remove, + .remove = mdp4_remove, .shutdown = msm_kms_shutdown, .driver = { .name = "mdp4", diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c index 374704cce656..3fcca7a3d82e 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c @@ -908,7 +908,7 @@ MODULE_DEVICE_TABLE(of, mdp5_dt_match); static struct platform_driver mdp5_driver = { .probe = mdp5_dev_probe, - .remove_new = mdp5_dev_remove, + .remove = mdp5_dev_remove, .shutdown = msm_kms_shutdown, .driver = { .name = "msm_mdp", diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c index add72bbc28b1..07a2c1e87219 100644 --- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c +++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c @@ -25,43 +25,41 @@ static void msm_disp_state_dump_regs(u32 **reg, u32 aligned_len, void __iomem *b addr = base_addr; end_addr = base_addr + aligned_len; - if (!(*reg)) - *reg = kzalloc(len_padded, GFP_KERNEL); - - if (*reg) - dump_addr = *reg; + *reg = kvzalloc(len_padded, GFP_KERNEL); + if (!*reg) + return; + dump_addr = *reg; for (i = 0; i < num_rows; i++) { x0 = (addr < end_addr) ? readl_relaxed(addr + 0x0) : 0; x4 = (addr + 0x4 < end_addr) ? readl_relaxed(addr + 0x4) : 0; x8 = (addr + 0x8 < end_addr) ? readl_relaxed(addr + 0x8) : 0; xc = (addr + 0xc < end_addr) ? readl_relaxed(addr + 0xc) : 0; - if (dump_addr) { - dump_addr[i * 4] = x0; - dump_addr[i * 4 + 1] = x4; - dump_addr[i * 4 + 2] = x8; - dump_addr[i * 4 + 3] = xc; - } + dump_addr[i * 4] = x0; + dump_addr[i * 4 + 1] = x4; + dump_addr[i * 4 + 2] = x8; + dump_addr[i * 4 + 3] = xc; addr += REG_DUMP_ALIGN; } } -static void msm_disp_state_print_regs(u32 **reg, u32 len, void __iomem *base_addr, - struct drm_printer *p) +static void msm_disp_state_print_regs(const u32 *dump_addr, u32 len, + void __iomem *base_addr, struct drm_printer *p) { int i; - u32 *dump_addr = NULL; void __iomem *addr; u32 num_rows; + if (!dump_addr) { + drm_printf(p, "Registers not stored\n"); + return; + } + addr = base_addr; num_rows = len / REG_DUMP_ALIGN; - if (*reg) - dump_addr = *reg; - for (i = 0; i < num_rows; i++) { drm_printf(p, "0x%lx : %08x %08x %08x %08x\n", (unsigned long)(addr - base_addr), @@ -89,7 +87,7 @@ void msm_disp_state_print(struct msm_disp_state *state, struct drm_printer *p) list_for_each_entry_safe(block, tmp, &state->blocks, node) { drm_printf(p, "====================%s================\n", block->name); - msm_disp_state_print_regs(&block->state, block->size, block->base_addr, p); + msm_disp_state_print_regs(block->state, block->size, block->base_addr, p); } drm_printf(p, "===================dpu drm state================\n"); @@ -161,7 +159,7 @@ void msm_disp_state_free(void *data) list_for_each_entry_safe(block, tmp, &disp_state->blocks, node) { list_del(&block->node); - kfree(block->state); + kvfree(block->state); kfree(block); } diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c index a599fc5d63c5..74e01a5dd419 100644 --- a/drivers/gpu/drm/msm/dp/dp_audio.c +++ b/drivers/gpu/drm/msm/dp/dp_audio.c @@ -17,281 +17,281 @@ #include "dp_display.h" #include "dp_utils.h" -struct dp_audio_private { +struct msm_dp_audio_private { struct platform_device *audio_pdev; struct platform_device *pdev; struct drm_device *drm_dev; - struct dp_catalog *catalog; + struct msm_dp_catalog *catalog; u32 channels; - struct dp_audio dp_audio; + struct msm_dp_audio msm_dp_audio; }; -static u32 dp_audio_get_header(struct dp_catalog *catalog, - enum dp_catalog_audio_sdp_type sdp, - enum dp_catalog_audio_header_type header) +static u32 msm_dp_audio_get_header(struct msm_dp_catalog *catalog, + enum msm_dp_catalog_audio_sdp_type sdp, + enum msm_dp_catalog_audio_header_type header) { - return dp_catalog_audio_get_header(catalog, sdp, header); + return msm_dp_catalog_audio_get_header(catalog, sdp, header); } -static void dp_audio_set_header(struct dp_catalog *catalog, +static void msm_dp_audio_set_header(struct msm_dp_catalog *catalog, u32 data, - enum dp_catalog_audio_sdp_type sdp, - enum dp_catalog_audio_header_type header) + enum msm_dp_catalog_audio_sdp_type sdp, + enum msm_dp_catalog_audio_header_type header) { - dp_catalog_audio_set_header(catalog, sdp, header, data); + msm_dp_catalog_audio_set_header(catalog, sdp, header, data); } -static void dp_audio_stream_sdp(struct dp_audio_private *audio) +static void msm_dp_audio_stream_sdp(struct msm_dp_audio_private *audio) { - struct dp_catalog *catalog = audio->catalog; + struct msm_dp_catalog *catalog = audio->catalog; u32 value, new_value; u8 parity_byte; /* Config header and parity byte 1 */ - value = dp_audio_get_header(catalog, + value = msm_dp_audio_get_header(catalog, DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_1); new_value = 0x02; - parity_byte = dp_utils_calculate_parity(new_value); + parity_byte = msm_dp_utils_calculate_parity(new_value); value |= ((new_value << HEADER_BYTE_1_BIT) | (parity_byte << PARITY_BYTE_1_BIT)); drm_dbg_dp(audio->drm_dev, "Header Byte 1: value = 0x%x, parity_byte = 0x%x\n", value, parity_byte); - dp_audio_set_header(catalog, value, + msm_dp_audio_set_header(catalog, value, DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_1); /* Config header and parity byte 2 */ - value = dp_audio_get_header(catalog, + value = msm_dp_audio_get_header(catalog, DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_2); new_value = value; - parity_byte = dp_utils_calculate_parity(new_value); + parity_byte = msm_dp_utils_calculate_parity(new_value); value |= ((new_value << HEADER_BYTE_2_BIT) | (parity_byte << PARITY_BYTE_2_BIT)); drm_dbg_dp(audio->drm_dev, "Header Byte 2: value = 0x%x, parity_byte = 0x%x\n", value, parity_byte); - dp_audio_set_header(catalog, value, + msm_dp_audio_set_header(catalog, value, DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_2); /* Config header and parity byte 3 */ - value = dp_audio_get_header(catalog, + value = msm_dp_audio_get_header(catalog, DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_3); new_value = audio->channels - 1; - parity_byte = dp_utils_calculate_parity(new_value); + parity_byte = msm_dp_utils_calculate_parity(new_value); value |= ((new_value << HEADER_BYTE_3_BIT) | (parity_byte << PARITY_BYTE_3_BIT)); drm_dbg_dp(audio->drm_dev, "Header Byte 3: value = 0x%x, parity_byte = 0x%x\n", value, parity_byte); - dp_audio_set_header(catalog, value, + msm_dp_audio_set_header(catalog, value, DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_3); } -static void dp_audio_timestamp_sdp(struct dp_audio_private *audio) +static void msm_dp_audio_timestamp_sdp(struct msm_dp_audio_private *audio) { - struct dp_catalog *catalog = audio->catalog; + struct msm_dp_catalog *catalog = audio->catalog; u32 value, new_value; u8 parity_byte; /* Config header and parity byte 1 */ - value = dp_audio_get_header(catalog, + value = msm_dp_audio_get_header(catalog, DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_1); new_value = 0x1; - parity_byte = dp_utils_calculate_parity(new_value); + parity_byte = msm_dp_utils_calculate_parity(new_value); value |= ((new_value << HEADER_BYTE_1_BIT) | (parity_byte << PARITY_BYTE_1_BIT)); drm_dbg_dp(audio->drm_dev, "Header Byte 1: value = 0x%x, parity_byte = 0x%x\n", value, parity_byte); - dp_audio_set_header(catalog, value, + msm_dp_audio_set_header(catalog, value, DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_1); /* Config header and parity byte 2 */ - value = dp_audio_get_header(catalog, + value = msm_dp_audio_get_header(catalog, DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_2); new_value = 0x17; - parity_byte = dp_utils_calculate_parity(new_value); + parity_byte = msm_dp_utils_calculate_parity(new_value); value |= ((new_value << HEADER_BYTE_2_BIT) | (parity_byte << PARITY_BYTE_2_BIT)); drm_dbg_dp(audio->drm_dev, "Header Byte 2: value = 0x%x, parity_byte = 0x%x\n", value, parity_byte); - dp_audio_set_header(catalog, value, + msm_dp_audio_set_header(catalog, value, DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_2); /* Config header and parity byte 3 */ - value = dp_audio_get_header(catalog, + value = msm_dp_audio_get_header(catalog, DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_3); new_value = (0x0 | (0x11 << 2)); - parity_byte = dp_utils_calculate_parity(new_value); + parity_byte = msm_dp_utils_calculate_parity(new_value); value |= ((new_value << HEADER_BYTE_3_BIT) | (parity_byte << PARITY_BYTE_3_BIT)); drm_dbg_dp(audio->drm_dev, "Header Byte 3: value = 0x%x, parity_byte = 0x%x\n", value, parity_byte); - dp_audio_set_header(catalog, value, + msm_dp_audio_set_header(catalog, value, DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_3); } -static void dp_audio_infoframe_sdp(struct dp_audio_private *audio) +static void msm_dp_audio_infoframe_sdp(struct msm_dp_audio_private *audio) { - struct dp_catalog *catalog = audio->catalog; + struct msm_dp_catalog *catalog = audio->catalog; u32 value, new_value; u8 parity_byte; /* Config header and parity byte 1 */ - value = dp_audio_get_header(catalog, + value = msm_dp_audio_get_header(catalog, DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_1); new_value = 0x84; - parity_byte = dp_utils_calculate_parity(new_value); + parity_byte = msm_dp_utils_calculate_parity(new_value); value |= ((new_value << HEADER_BYTE_1_BIT) | (parity_byte << PARITY_BYTE_1_BIT)); drm_dbg_dp(audio->drm_dev, "Header Byte 1: value = 0x%x, parity_byte = 0x%x\n", value, parity_byte); - dp_audio_set_header(catalog, value, + msm_dp_audio_set_header(catalog, value, DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_1); /* Config header and parity byte 2 */ - value = dp_audio_get_header(catalog, + value = msm_dp_audio_get_header(catalog, DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_2); new_value = 0x1b; - parity_byte = dp_utils_calculate_parity(new_value); + parity_byte = msm_dp_utils_calculate_parity(new_value); value |= ((new_value << HEADER_BYTE_2_BIT) | (parity_byte << PARITY_BYTE_2_BIT)); drm_dbg_dp(audio->drm_dev, "Header Byte 2: value = 0x%x, parity_byte = 0x%x\n", value, parity_byte); - dp_audio_set_header(catalog, value, + msm_dp_audio_set_header(catalog, value, DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_2); /* Config header and parity byte 3 */ - value = dp_audio_get_header(catalog, + value = msm_dp_audio_get_header(catalog, DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_3); new_value = (0x0 | (0x11 << 2)); - parity_byte = dp_utils_calculate_parity(new_value); + parity_byte = msm_dp_utils_calculate_parity(new_value); value |= ((new_value << HEADER_BYTE_3_BIT) | (parity_byte << PARITY_BYTE_3_BIT)); drm_dbg_dp(audio->drm_dev, "Header Byte 3: value = 0x%x, parity_byte = 0x%x\n", new_value, parity_byte); - dp_audio_set_header(catalog, value, + msm_dp_audio_set_header(catalog, value, DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_3); } -static void dp_audio_copy_management_sdp(struct dp_audio_private *audio) +static void msm_dp_audio_copy_management_sdp(struct msm_dp_audio_private *audio) { - struct dp_catalog *catalog = audio->catalog; + struct msm_dp_catalog *catalog = audio->catalog; u32 value, new_value; u8 parity_byte; /* Config header and parity byte 1 */ - value = dp_audio_get_header(catalog, + value = msm_dp_audio_get_header(catalog, DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_1); new_value = 0x05; - parity_byte = dp_utils_calculate_parity(new_value); + parity_byte = msm_dp_utils_calculate_parity(new_value); value |= ((new_value << HEADER_BYTE_1_BIT) | (parity_byte << PARITY_BYTE_1_BIT)); drm_dbg_dp(audio->drm_dev, "Header Byte 1: value = 0x%x, parity_byte = 0x%x\n", value, parity_byte); - dp_audio_set_header(catalog, value, + msm_dp_audio_set_header(catalog, value, DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_1); /* Config header and parity byte 2 */ - value = dp_audio_get_header(catalog, + value = msm_dp_audio_get_header(catalog, DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_2); new_value = 0x0F; - parity_byte = dp_utils_calculate_parity(new_value); + parity_byte = msm_dp_utils_calculate_parity(new_value); value |= ((new_value << HEADER_BYTE_2_BIT) | (parity_byte << PARITY_BYTE_2_BIT)); drm_dbg_dp(audio->drm_dev, "Header Byte 2: value = 0x%x, parity_byte = 0x%x\n", value, parity_byte); - dp_audio_set_header(catalog, value, + msm_dp_audio_set_header(catalog, value, DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_2); /* Config header and parity byte 3 */ - value = dp_audio_get_header(catalog, + value = msm_dp_audio_get_header(catalog, DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_3); new_value = 0x0; - parity_byte = dp_utils_calculate_parity(new_value); + parity_byte = msm_dp_utils_calculate_parity(new_value); value |= ((new_value << HEADER_BYTE_3_BIT) | (parity_byte << PARITY_BYTE_3_BIT)); drm_dbg_dp(audio->drm_dev, "Header Byte 3: value = 0x%x, parity_byte = 0x%x\n", value, parity_byte); - dp_audio_set_header(catalog, value, + msm_dp_audio_set_header(catalog, value, DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_3); } -static void dp_audio_isrc_sdp(struct dp_audio_private *audio) +static void msm_dp_audio_isrc_sdp(struct msm_dp_audio_private *audio) { - struct dp_catalog *catalog = audio->catalog; + struct msm_dp_catalog *catalog = audio->catalog; u32 value, new_value; u8 parity_byte; /* Config header and parity byte 1 */ - value = dp_audio_get_header(catalog, + value = msm_dp_audio_get_header(catalog, DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_1); new_value = 0x06; - parity_byte = dp_utils_calculate_parity(new_value); + parity_byte = msm_dp_utils_calculate_parity(new_value); value |= ((new_value << HEADER_BYTE_1_BIT) | (parity_byte << PARITY_BYTE_1_BIT)); drm_dbg_dp(audio->drm_dev, "Header Byte 1: value = 0x%x, parity_byte = 0x%x\n", value, parity_byte); - dp_audio_set_header(catalog, value, + msm_dp_audio_set_header(catalog, value, DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_1); /* Config header and parity byte 2 */ - value = dp_audio_get_header(catalog, + value = msm_dp_audio_get_header(catalog, DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_2); new_value = 0x0F; - parity_byte = dp_utils_calculate_parity(new_value); + parity_byte = msm_dp_utils_calculate_parity(new_value); value |= ((new_value << HEADER_BYTE_2_BIT) | (parity_byte << PARITY_BYTE_2_BIT)); drm_dbg_dp(audio->drm_dev, "Header Byte 2: value = 0x%x, parity_byte = 0x%x\n", value, parity_byte); - dp_audio_set_header(catalog, value, + msm_dp_audio_set_header(catalog, value, DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_2); } -static void dp_audio_setup_sdp(struct dp_audio_private *audio) +static void msm_dp_audio_setup_sdp(struct msm_dp_audio_private *audio) { - dp_catalog_audio_config_sdp(audio->catalog); + msm_dp_catalog_audio_config_sdp(audio->catalog); - dp_audio_stream_sdp(audio); - dp_audio_timestamp_sdp(audio); - dp_audio_infoframe_sdp(audio); - dp_audio_copy_management_sdp(audio); - dp_audio_isrc_sdp(audio); + msm_dp_audio_stream_sdp(audio); + msm_dp_audio_timestamp_sdp(audio); + msm_dp_audio_infoframe_sdp(audio); + msm_dp_audio_copy_management_sdp(audio); + msm_dp_audio_isrc_sdp(audio); } -static void dp_audio_setup_acr(struct dp_audio_private *audio) +static void msm_dp_audio_setup_acr(struct msm_dp_audio_private *audio) { u32 select = 0; - struct dp_catalog *catalog = audio->catalog; + struct msm_dp_catalog *catalog = audio->catalog; - switch (audio->dp_audio.bw_code) { + switch (audio->msm_dp_audio.bw_code) { case DP_LINK_BW_1_62: select = 0; break; @@ -310,15 +310,15 @@ static void dp_audio_setup_acr(struct dp_audio_private *audio) break; } - dp_catalog_audio_config_acr(catalog, select); + msm_dp_catalog_audio_config_acr(catalog, select); } -static void dp_audio_safe_to_exit_level(struct dp_audio_private *audio) +static void msm_dp_audio_safe_to_exit_level(struct msm_dp_audio_private *audio) { - struct dp_catalog *catalog = audio->catalog; + struct msm_dp_catalog *catalog = audio->catalog; u32 safe_to_exit_level = 0; - switch (audio->dp_audio.lane_count) { + switch (audio->msm_dp_audio.lane_count) { case 1: safe_to_exit_level = 14; break; @@ -336,49 +336,49 @@ static void dp_audio_safe_to_exit_level(struct dp_audio_private *audio) break; } - dp_catalog_audio_sfe_level(catalog, safe_to_exit_level); + msm_dp_catalog_audio_sfe_level(catalog, safe_to_exit_level); } -static void dp_audio_enable(struct dp_audio_private *audio, bool enable) +static void msm_dp_audio_enable(struct msm_dp_audio_private *audio, bool enable) { - struct dp_catalog *catalog = audio->catalog; + struct msm_dp_catalog *catalog = audio->catalog; - dp_catalog_audio_enable(catalog, enable); + msm_dp_catalog_audio_enable(catalog, enable); } -static struct dp_audio_private *dp_audio_get_data(struct platform_device *pdev) +static struct msm_dp_audio_private *msm_dp_audio_get_data(struct platform_device *pdev) { - struct dp_audio *dp_audio; - struct msm_dp *dp_display; + struct msm_dp_audio *msm_dp_audio; + struct msm_dp *msm_dp_display; if (!pdev) { DRM_ERROR("invalid input\n"); return ERR_PTR(-ENODEV); } - dp_display = platform_get_drvdata(pdev); - if (!dp_display) { + msm_dp_display = platform_get_drvdata(pdev); + if (!msm_dp_display) { DRM_ERROR("invalid input\n"); return ERR_PTR(-ENODEV); } - dp_audio = dp_display->dp_audio; + msm_dp_audio = msm_dp_display->msm_dp_audio; - if (!dp_audio) { - DRM_ERROR("invalid dp_audio data\n"); + if (!msm_dp_audio) { + DRM_ERROR("invalid msm_dp_audio data\n"); return ERR_PTR(-EINVAL); } - return container_of(dp_audio, struct dp_audio_private, dp_audio); + return container_of(msm_dp_audio, struct msm_dp_audio_private, msm_dp_audio); } -static int dp_audio_hook_plugged_cb(struct device *dev, void *data, +static int msm_dp_audio_hook_plugged_cb(struct device *dev, void *data, hdmi_codec_plugged_cb fn, struct device *codec_dev) { struct platform_device *pdev; - struct msm_dp *dp_display; + struct msm_dp *msm_dp_display; pdev = to_platform_device(dev); if (!pdev) { @@ -386,20 +386,20 @@ static int dp_audio_hook_plugged_cb(struct device *dev, void *data, return -ENODEV; } - dp_display = platform_get_drvdata(pdev); - if (!dp_display) { + msm_dp_display = platform_get_drvdata(pdev); + if (!msm_dp_display) { pr_err("invalid input\n"); return -ENODEV; } - return dp_display_set_plugged_cb(dp_display, fn, codec_dev); + return msm_dp_display_set_plugged_cb(msm_dp_display, fn, codec_dev); } -static int dp_audio_get_eld(struct device *dev, +static int msm_dp_audio_get_eld(struct device *dev, void *data, uint8_t *buf, size_t len) { struct platform_device *pdev; - struct msm_dp *dp_display; + struct msm_dp *msm_dp_display; pdev = to_platform_device(dev); @@ -408,30 +408,30 @@ static int dp_audio_get_eld(struct device *dev, return -ENODEV; } - dp_display = platform_get_drvdata(pdev); - if (!dp_display) { + msm_dp_display = platform_get_drvdata(pdev); + if (!msm_dp_display) { DRM_ERROR("invalid input\n"); return -ENODEV; } - memcpy(buf, dp_display->connector->eld, - min(sizeof(dp_display->connector->eld), len)); + memcpy(buf, msm_dp_display->connector->eld, + min(sizeof(msm_dp_display->connector->eld), len)); return 0; } -int dp_audio_hw_params(struct device *dev, +int msm_dp_audio_hw_params(struct device *dev, void *data, struct hdmi_codec_daifmt *daifmt, struct hdmi_codec_params *params) { int rc = 0; - struct dp_audio_private *audio; + struct msm_dp_audio_private *audio; struct platform_device *pdev; - struct msm_dp *dp_display; + struct msm_dp *msm_dp_display; pdev = to_platform_device(dev); - dp_display = platform_get_drvdata(pdev); + msm_dp_display = platform_get_drvdata(pdev); /* * there could be cases where sound card can be opened even @@ -441,12 +441,12 @@ int dp_audio_hw_params(struct device *dev, * such cases check for connection status and bail out if not * connected. */ - if (!dp_display->power_on) { + if (!msm_dp_display->power_on) { rc = -EINVAL; goto end; } - audio = dp_audio_get_data(pdev); + audio = msm_dp_audio_get_data(pdev); if (IS_ERR(audio)) { rc = PTR_ERR(audio); goto end; @@ -454,26 +454,26 @@ int dp_audio_hw_params(struct device *dev, audio->channels = params->channels; - dp_audio_setup_sdp(audio); - dp_audio_setup_acr(audio); - dp_audio_safe_to_exit_level(audio); - dp_audio_enable(audio, true); - dp_display_signal_audio_start(dp_display); - dp_display->audio_enabled = true; + msm_dp_audio_setup_sdp(audio); + msm_dp_audio_setup_acr(audio); + msm_dp_audio_safe_to_exit_level(audio); + msm_dp_audio_enable(audio, true); + msm_dp_display_signal_audio_start(msm_dp_display); + msm_dp_display->audio_enabled = true; end: return rc; } -static void dp_audio_shutdown(struct device *dev, void *data) +static void msm_dp_audio_shutdown(struct device *dev, void *data) { - struct dp_audio_private *audio; + struct msm_dp_audio_private *audio; struct platform_device *pdev; - struct msm_dp *dp_display; + struct msm_dp *msm_dp_display; pdev = to_platform_device(dev); - dp_display = platform_get_drvdata(pdev); - audio = dp_audio_get_data(pdev); + msm_dp_display = platform_get_drvdata(pdev); + audio = msm_dp_audio_get_data(pdev); if (IS_ERR(audio)) { DRM_ERROR("failed to get audio data\n"); return; @@ -487,32 +487,32 @@ static void dp_audio_shutdown(struct device *dev, void *data) * connected. is_connected cannot be used here as its set * to false earlier than this call */ - if (!dp_display->audio_enabled) + if (!msm_dp_display->audio_enabled) return; - dp_audio_enable(audio, false); + msm_dp_audio_enable(audio, false); /* signal the dp display to safely shutdown clocks */ - dp_display_signal_audio_complete(dp_display); + msm_dp_display_signal_audio_complete(msm_dp_display); } -static const struct hdmi_codec_ops dp_audio_codec_ops = { - .hw_params = dp_audio_hw_params, - .audio_shutdown = dp_audio_shutdown, - .get_eld = dp_audio_get_eld, - .hook_plugged_cb = dp_audio_hook_plugged_cb, +static const struct hdmi_codec_ops msm_dp_audio_codec_ops = { + .hw_params = msm_dp_audio_hw_params, + .audio_shutdown = msm_dp_audio_shutdown, + .get_eld = msm_dp_audio_get_eld, + .hook_plugged_cb = msm_dp_audio_hook_plugged_cb, }; static struct hdmi_codec_pdata codec_data = { - .ops = &dp_audio_codec_ops, + .ops = &msm_dp_audio_codec_ops, .max_i2s_channels = 8, .i2s = 1, }; -void dp_unregister_audio_driver(struct device *dev, struct dp_audio *dp_audio) +void msm_dp_unregister_audio_driver(struct device *dev, struct msm_dp_audio *msm_dp_audio) { - struct dp_audio_private *audio_priv; + struct msm_dp_audio_private *audio_priv; - audio_priv = container_of(dp_audio, struct dp_audio_private, dp_audio); + audio_priv = container_of(msm_dp_audio, struct msm_dp_audio_private, msm_dp_audio); if (audio_priv->audio_pdev) { platform_device_unregister(audio_priv->audio_pdev); @@ -520,13 +520,13 @@ void dp_unregister_audio_driver(struct device *dev, struct dp_audio *dp_audio) } } -int dp_register_audio_driver(struct device *dev, - struct dp_audio *dp_audio) +int msm_dp_register_audio_driver(struct device *dev, + struct msm_dp_audio *msm_dp_audio) { - struct dp_audio_private *audio_priv; + struct msm_dp_audio_private *audio_priv; - audio_priv = container_of(dp_audio, - struct dp_audio_private, dp_audio); + audio_priv = container_of(msm_dp_audio, + struct msm_dp_audio_private, msm_dp_audio); audio_priv->audio_pdev = platform_device_register_data(dev, HDMI_CODEC_DRV_NAME, @@ -536,13 +536,13 @@ int dp_register_audio_driver(struct device *dev, return PTR_ERR_OR_ZERO(audio_priv->audio_pdev); } -struct dp_audio *dp_audio_get(struct platform_device *pdev, - struct dp_panel *panel, - struct dp_catalog *catalog) +struct msm_dp_audio *msm_dp_audio_get(struct platform_device *pdev, + struct msm_dp_panel *panel, + struct msm_dp_catalog *catalog) { int rc = 0; - struct dp_audio_private *audio; - struct dp_audio *dp_audio; + struct msm_dp_audio_private *audio; + struct msm_dp_audio *msm_dp_audio; if (!pdev || !panel || !catalog) { DRM_ERROR("invalid input\n"); @@ -559,23 +559,23 @@ struct dp_audio *dp_audio_get(struct platform_device *pdev, audio->pdev = pdev; audio->catalog = catalog; - dp_audio = &audio->dp_audio; + msm_dp_audio = &audio->msm_dp_audio; - dp_catalog_audio_init(catalog); + msm_dp_catalog_audio_init(catalog); - return dp_audio; + return msm_dp_audio; error: return ERR_PTR(rc); } -void dp_audio_put(struct dp_audio *dp_audio) +void msm_dp_audio_put(struct msm_dp_audio *msm_dp_audio) { - struct dp_audio_private *audio; + struct msm_dp_audio_private *audio; - if (!dp_audio) + if (!msm_dp_audio) return; - audio = container_of(dp_audio, struct dp_audio_private, dp_audio); + audio = container_of(msm_dp_audio, struct msm_dp_audio_private, msm_dp_audio); devm_kfree(&audio->pdev->dev, audio); } diff --git a/drivers/gpu/drm/msm/dp/dp_audio.h b/drivers/gpu/drm/msm/dp/dp_audio.h index 4ab78880af82..1c9efaaa40e5 100644 --- a/drivers/gpu/drm/msm/dp/dp_audio.h +++ b/drivers/gpu/drm/msm/dp/dp_audio.h @@ -13,58 +13,58 @@ #include <sound/hdmi-codec.h> /** - * struct dp_audio + * struct msm_dp_audio * @lane_count: number of lanes configured in current session * @bw_code: link rate's bandwidth code for current session */ -struct dp_audio { +struct msm_dp_audio { u32 lane_count; u32 bw_code; }; /** - * dp_audio_get() + * msm_dp_audio_get() * * Creates and instance of dp audio. * * @pdev: caller's platform device instance. - * @panel: an instance of dp_panel module. - * @catalog: an instance of dp_catalog module. + * @panel: an instance of msm_dp_panel module. + * @catalog: an instance of msm_dp_catalog module. * * Returns the error code in case of failure, otherwize - * an instance of newly created dp_module. + * an instance of newly created msm_dp_module. */ -struct dp_audio *dp_audio_get(struct platform_device *pdev, - struct dp_panel *panel, - struct dp_catalog *catalog); +struct msm_dp_audio *msm_dp_audio_get(struct platform_device *pdev, + struct msm_dp_panel *panel, + struct msm_dp_catalog *catalog); /** - * dp_register_audio_driver() + * msm_dp_register_audio_driver() * * Registers DP device with hdmi_codec interface. * * @dev: DP device instance. - * @dp_audio: an instance of dp_audio module. + * @msm_dp_audio: an instance of msm_dp_audio module. * * * Returns the error code in case of failure, otherwise * zero on success. */ -int dp_register_audio_driver(struct device *dev, - struct dp_audio *dp_audio); +int msm_dp_register_audio_driver(struct device *dev, + struct msm_dp_audio *msm_dp_audio); -void dp_unregister_audio_driver(struct device *dev, struct dp_audio *dp_audio); +void msm_dp_unregister_audio_driver(struct device *dev, struct msm_dp_audio *msm_dp_audio); /** - * dp_audio_put() + * msm_dp_audio_put() * - * Cleans the dp_audio instance. + * Cleans the msm_dp_audio instance. * - * @dp_audio: an instance of dp_audio. + * @msm_dp_audio: an instance of msm_dp_audio. */ -void dp_audio_put(struct dp_audio *dp_audio); +void msm_dp_audio_put(struct msm_dp_audio *msm_dp_audio); -int dp_audio_hw_params(struct device *dev, +int msm_dp_audio_hw_params(struct device *dev, void *data, struct hdmi_codec_daifmt *daifmt, struct hdmi_codec_params *params); diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c index 00dfafbebe0e..bc8d46abfc61 100644 --- a/drivers/gpu/drm/msm/dp/dp_aux.c +++ b/drivers/gpu/drm/msm/dp/dp_aux.c @@ -20,9 +20,9 @@ enum msm_dp_aux_err { DP_AUX_ERR_PHY, }; -struct dp_aux_private { +struct msm_dp_aux_private { struct device *dev; - struct dp_catalog *catalog; + struct msm_dp_catalog *catalog; struct phy *phy; @@ -42,12 +42,12 @@ struct dp_aux_private { u32 offset; u32 segment; - struct drm_dp_aux dp_aux; + struct drm_dp_aux msm_dp_aux; }; #define MAX_AUX_RETRIES 5 -static ssize_t dp_aux_write(struct dp_aux_private *aux, +static ssize_t msm_dp_aux_write(struct msm_dp_aux_private *aux, struct drm_dp_aux_msg *msg) { u8 data[4]; @@ -88,11 +88,11 @@ static ssize_t dp_aux_write(struct dp_aux_private *aux, /* index = 0, write */ if (i == 0) reg |= DP_AUX_DATA_INDEX_WRITE; - dp_catalog_aux_write_data(aux->catalog, reg); + msm_dp_catalog_aux_write_data(aux->catalog, reg); } - dp_catalog_aux_clear_trans(aux->catalog, false); - dp_catalog_aux_clear_hw_interrupts(aux->catalog); + msm_dp_catalog_aux_clear_trans(aux->catalog, false); + msm_dp_catalog_aux_clear_hw_interrupts(aux->catalog); reg = 0; /* Transaction number == 1 */ if (!aux->native) { /* i2c */ @@ -106,12 +106,12 @@ static ssize_t dp_aux_write(struct dp_aux_private *aux, } reg |= DP_AUX_TRANS_CTRL_GO; - dp_catalog_aux_write_trans(aux->catalog, reg); + msm_dp_catalog_aux_write_trans(aux->catalog, reg); return len; } -static ssize_t dp_aux_cmd_fifo_tx(struct dp_aux_private *aux, +static ssize_t msm_dp_aux_cmd_fifo_tx(struct msm_dp_aux_private *aux, struct drm_dp_aux_msg *msg) { ssize_t ret; @@ -119,7 +119,7 @@ static ssize_t dp_aux_cmd_fifo_tx(struct dp_aux_private *aux, reinit_completion(&aux->comp); - ret = dp_aux_write(aux, msg); + ret = msm_dp_aux_write(aux, msg); if (ret < 0) return ret; @@ -131,7 +131,7 @@ static ssize_t dp_aux_cmd_fifo_tx(struct dp_aux_private *aux, return ret; } -static ssize_t dp_aux_cmd_fifo_rx(struct dp_aux_private *aux, +static ssize_t msm_dp_aux_cmd_fifo_rx(struct msm_dp_aux_private *aux, struct drm_dp_aux_msg *msg) { u32 data; @@ -139,20 +139,20 @@ static ssize_t dp_aux_cmd_fifo_rx(struct dp_aux_private *aux, u32 i, actual_i; u32 len = msg->size; - dp_catalog_aux_clear_trans(aux->catalog, true); + msm_dp_catalog_aux_clear_trans(aux->catalog, true); data = DP_AUX_DATA_INDEX_WRITE; /* INDEX_WRITE */ data |= DP_AUX_DATA_READ; /* read */ - dp_catalog_aux_write_data(aux->catalog, data); + msm_dp_catalog_aux_write_data(aux->catalog, data); dp = msg->buffer; /* discard first byte */ - data = dp_catalog_aux_read_data(aux->catalog); + data = msm_dp_catalog_aux_read_data(aux->catalog); for (i = 0; i < len; i++) { - data = dp_catalog_aux_read_data(aux->catalog); + data = msm_dp_catalog_aux_read_data(aux->catalog); *dp++ = (u8)((data >> DP_AUX_DATA_OFFSET) & 0xff); actual_i = (data >> DP_AUX_DATA_INDEX_OFFSET) & 0xFF; @@ -163,7 +163,7 @@ static ssize_t dp_aux_cmd_fifo_rx(struct dp_aux_private *aux, return i; } -static void dp_aux_update_offset_and_segment(struct dp_aux_private *aux, +static void msm_dp_aux_update_offset_and_segment(struct msm_dp_aux_private *aux, struct drm_dp_aux_msg *input_msg) { u32 edid_address = 0x50; @@ -185,7 +185,7 @@ static void dp_aux_update_offset_and_segment(struct dp_aux_private *aux, } /** - * dp_aux_transfer_helper() - helper function for EDID read transactions + * msm_dp_aux_transfer_helper() - helper function for EDID read transactions * * @aux: DP AUX private structure * @input_msg: input message from DRM upstream APIs @@ -196,7 +196,7 @@ static void dp_aux_update_offset_and_segment(struct dp_aux_private *aux, * This helper function is used to fix EDID reads for non-compliant * sinks that do not handle the i2c middle-of-transaction flag correctly. */ -static void dp_aux_transfer_helper(struct dp_aux_private *aux, +static void msm_dp_aux_transfer_helper(struct msm_dp_aux_private *aux, struct drm_dp_aux_msg *input_msg, bool send_seg) { @@ -238,7 +238,7 @@ static void dp_aux_transfer_helper(struct dp_aux_private *aux, helper_msg.address = segment_address; helper_msg.buffer = &aux->segment; helper_msg.size = 1; - dp_aux_cmd_fifo_tx(aux, &helper_msg); + msm_dp_aux_cmd_fifo_tx(aux, &helper_msg); } /* @@ -252,7 +252,7 @@ static void dp_aux_transfer_helper(struct dp_aux_private *aux, helper_msg.address = input_msg->address; helper_msg.buffer = &aux->offset; helper_msg.size = 1; - dp_aux_cmd_fifo_tx(aux, &helper_msg); + msm_dp_aux_cmd_fifo_tx(aux, &helper_msg); end: aux->offset += message_size; @@ -265,15 +265,15 @@ end: * It will call aux_reset() function to reset the AUX channel, * if the waiting is timeout. */ -static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux, +static ssize_t msm_dp_aux_transfer(struct drm_dp_aux *msm_dp_aux, struct drm_dp_aux_msg *msg) { ssize_t ret; int const aux_cmd_native_max = 16; int const aux_cmd_i2c_max = 128; - struct dp_aux_private *aux; + struct msm_dp_aux_private *aux; - aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux); aux->native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ); @@ -292,7 +292,7 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux, return -EINVAL; } - ret = pm_runtime_resume_and_get(dp_aux->dev); + ret = pm_runtime_resume_and_get(msm_dp_aux->dev); if (ret) return ret; @@ -313,8 +313,8 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux, goto exit; } - dp_aux_update_offset_and_segment(aux, msg); - dp_aux_transfer_helper(aux, msg, true); + msm_dp_aux_update_offset_and_segment(aux, msg); + msm_dp_aux_transfer_helper(aux, msg, true); aux->read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ); aux->cmd_busy = true; @@ -327,7 +327,7 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux, aux->no_send_stop = true; } - ret = dp_aux_cmd_fifo_tx(aux, msg); + ret = msm_dp_aux_cmd_fifo_tx(aux, msg); if (ret < 0) { if (aux->native) { aux->retry_cnt++; @@ -335,14 +335,14 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux, phy_calibrate(aux->phy); } /* reset aux if link is in connected state */ - if (dp_catalog_link_is_connected(aux->catalog)) - dp_catalog_aux_reset(aux->catalog); + if (msm_dp_catalog_link_is_connected(aux->catalog)) + msm_dp_catalog_aux_reset(aux->catalog); } else { aux->retry_cnt = 0; switch (aux->aux_error_num) { case DP_AUX_ERR_NONE: if (aux->read) - ret = dp_aux_cmd_fifo_rx(aux, msg); + ret = msm_dp_aux_cmd_fifo_rx(aux, msg); msg->reply = aux->native ? DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK; break; case DP_AUX_ERR_DEFER: @@ -364,24 +364,24 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux, exit: mutex_unlock(&aux->mutex); - pm_runtime_put_sync(dp_aux->dev); + pm_runtime_put_sync(msm_dp_aux->dev); return ret; } -irqreturn_t dp_aux_isr(struct drm_dp_aux *dp_aux) +irqreturn_t msm_dp_aux_isr(struct drm_dp_aux *msm_dp_aux) { u32 isr; - struct dp_aux_private *aux; + struct msm_dp_aux_private *aux; - if (!dp_aux) { + if (!msm_dp_aux) { DRM_ERROR("invalid input\n"); return IRQ_NONE; } - aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux); - isr = dp_catalog_aux_get_irq(aux->catalog); + isr = msm_dp_catalog_aux_get_irq(aux->catalog); /* no interrupts pending, return immediately */ if (!isr) @@ -403,7 +403,7 @@ irqreturn_t dp_aux_isr(struct drm_dp_aux *dp_aux) if (isr & DP_INTR_AUX_ERROR) { aux->aux_error_num = DP_AUX_ERR_PHY; - dp_catalog_aux_clear_hw_interrupts(aux->catalog); + msm_dp_catalog_aux_clear_hw_interrupts(aux->catalog); } else if (isr & DP_INTR_NACK_DEFER) { aux->aux_error_num = DP_AUX_ERR_NACK_DEFER; } else if (isr & DP_INTR_WRONG_ADDR) { @@ -429,68 +429,68 @@ irqreturn_t dp_aux_isr(struct drm_dp_aux *dp_aux) return IRQ_HANDLED; } -void dp_aux_enable_xfers(struct drm_dp_aux *dp_aux, bool enabled) +void msm_dp_aux_enable_xfers(struct drm_dp_aux *msm_dp_aux, bool enabled) { - struct dp_aux_private *aux; + struct msm_dp_aux_private *aux; - aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux); aux->enable_xfers = enabled; } -void dp_aux_reconfig(struct drm_dp_aux *dp_aux) +void msm_dp_aux_reconfig(struct drm_dp_aux *msm_dp_aux) { - struct dp_aux_private *aux; + struct msm_dp_aux_private *aux; - aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux); phy_calibrate(aux->phy); - dp_catalog_aux_reset(aux->catalog); + msm_dp_catalog_aux_reset(aux->catalog); } -void dp_aux_init(struct drm_dp_aux *dp_aux) +void msm_dp_aux_init(struct drm_dp_aux *msm_dp_aux) { - struct dp_aux_private *aux; + struct msm_dp_aux_private *aux; - if (!dp_aux) { + if (!msm_dp_aux) { DRM_ERROR("invalid input\n"); return; } - aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux); mutex_lock(&aux->mutex); - dp_catalog_aux_enable(aux->catalog, true); + msm_dp_catalog_aux_enable(aux->catalog, true); aux->retry_cnt = 0; aux->initted = true; mutex_unlock(&aux->mutex); } -void dp_aux_deinit(struct drm_dp_aux *dp_aux) +void msm_dp_aux_deinit(struct drm_dp_aux *msm_dp_aux) { - struct dp_aux_private *aux; + struct msm_dp_aux_private *aux; - aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux); mutex_lock(&aux->mutex); aux->initted = false; - dp_catalog_aux_enable(aux->catalog, false); + msm_dp_catalog_aux_enable(aux->catalog, false); mutex_unlock(&aux->mutex); } -int dp_aux_register(struct drm_dp_aux *dp_aux) +int msm_dp_aux_register(struct drm_dp_aux *msm_dp_aux) { int ret; - if (!dp_aux) { + if (!msm_dp_aux) { DRM_ERROR("invalid input\n"); return -EINVAL; } - ret = drm_dp_aux_register(dp_aux); + ret = drm_dp_aux_register(msm_dp_aux); if (ret) { DRM_ERROR("%s: failed to register drm aux: %d\n", __func__, ret); @@ -500,34 +500,34 @@ int dp_aux_register(struct drm_dp_aux *dp_aux) return 0; } -void dp_aux_unregister(struct drm_dp_aux *dp_aux) +void msm_dp_aux_unregister(struct drm_dp_aux *msm_dp_aux) { - drm_dp_aux_unregister(dp_aux); + drm_dp_aux_unregister(msm_dp_aux); } -static int dp_wait_hpd_asserted(struct drm_dp_aux *dp_aux, +static int msm_dp_wait_hpd_asserted(struct drm_dp_aux *msm_dp_aux, unsigned long wait_us) { int ret; - struct dp_aux_private *aux; + struct msm_dp_aux_private *aux; - aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux); ret = pm_runtime_resume_and_get(aux->dev); if (ret) return ret; - ret = dp_catalog_aux_wait_for_hpd_connect_state(aux->catalog, wait_us); + ret = msm_dp_catalog_aux_wait_for_hpd_connect_state(aux->catalog, wait_us); pm_runtime_put_sync(aux->dev); return ret; } -struct drm_dp_aux *dp_aux_get(struct device *dev, struct dp_catalog *catalog, +struct drm_dp_aux *msm_dp_aux_get(struct device *dev, struct msm_dp_catalog *catalog, struct phy *phy, bool is_edp) { - struct dp_aux_private *aux; + struct msm_dp_aux_private *aux; if (!catalog) { DRM_ERROR("invalid input\n"); @@ -553,23 +553,23 @@ struct drm_dp_aux *dp_aux_get(struct device *dev, struct dp_catalog *catalog, * before registering AUX with the DRM device so that * msm eDP panel can be detected by generic_dep_panel_probe(). */ - aux->dp_aux.name = "dpu_dp_aux"; - aux->dp_aux.dev = dev; - aux->dp_aux.transfer = dp_aux_transfer; - aux->dp_aux.wait_hpd_asserted = dp_wait_hpd_asserted; - drm_dp_aux_init(&aux->dp_aux); + aux->msm_dp_aux.name = "dpu_dp_aux"; + aux->msm_dp_aux.dev = dev; + aux->msm_dp_aux.transfer = msm_dp_aux_transfer; + aux->msm_dp_aux.wait_hpd_asserted = msm_dp_wait_hpd_asserted; + drm_dp_aux_init(&aux->msm_dp_aux); - return &aux->dp_aux; + return &aux->msm_dp_aux; } -void dp_aux_put(struct drm_dp_aux *dp_aux) +void msm_dp_aux_put(struct drm_dp_aux *msm_dp_aux) { - struct dp_aux_private *aux; + struct msm_dp_aux_private *aux; - if (!dp_aux) + if (!msm_dp_aux) return; - aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux); mutex_destroy(&aux->mutex); diff --git a/drivers/gpu/drm/msm/dp/dp_aux.h b/drivers/gpu/drm/msm/dp/dp_aux.h index 4f65e892a807..39c5b4c8596a 100644 --- a/drivers/gpu/drm/msm/dp/dp_aux.h +++ b/drivers/gpu/drm/msm/dp/dp_aux.h @@ -9,18 +9,18 @@ #include "dp_catalog.h" #include <drm/display/drm_dp_helper.h> -int dp_aux_register(struct drm_dp_aux *dp_aux); -void dp_aux_unregister(struct drm_dp_aux *dp_aux); -irqreturn_t dp_aux_isr(struct drm_dp_aux *dp_aux); -void dp_aux_enable_xfers(struct drm_dp_aux *dp_aux, bool enabled); -void dp_aux_init(struct drm_dp_aux *dp_aux); -void dp_aux_deinit(struct drm_dp_aux *dp_aux); -void dp_aux_reconfig(struct drm_dp_aux *dp_aux); +int msm_dp_aux_register(struct drm_dp_aux *msm_dp_aux); +void msm_dp_aux_unregister(struct drm_dp_aux *msm_dp_aux); +irqreturn_t msm_dp_aux_isr(struct drm_dp_aux *msm_dp_aux); +void msm_dp_aux_enable_xfers(struct drm_dp_aux *msm_dp_aux, bool enabled); +void msm_dp_aux_init(struct drm_dp_aux *msm_dp_aux); +void msm_dp_aux_deinit(struct drm_dp_aux *msm_dp_aux); +void msm_dp_aux_reconfig(struct drm_dp_aux *msm_dp_aux); struct phy; -struct drm_dp_aux *dp_aux_get(struct device *dev, struct dp_catalog *catalog, +struct drm_dp_aux *msm_dp_aux_get(struct device *dev, struct msm_dp_catalog *catalog, struct phy *phy, bool is_edp); -void dp_aux_put(struct drm_dp_aux *aux); +void msm_dp_aux_put(struct drm_dp_aux *aux); #endif /*__DP_AUX_H_*/ diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c index 6e55cbf69674..b4c8856fb25d 100644 --- a/drivers/gpu/drm/msm/dp/dp_catalog.c +++ b/drivers/gpu/drm/msm/dp/dp_catalog.c @@ -75,18 +75,18 @@ struct dss_io_data { struct dss_io_region p0; }; -struct dp_catalog_private { +struct msm_dp_catalog_private { struct device *dev; struct drm_device *drm_dev; struct dss_io_data io; u32 (*audio_map)[DP_AUDIO_SDP_HEADER_MAX]; - struct dp_catalog dp_catalog; + struct msm_dp_catalog msm_dp_catalog; }; -void dp_catalog_snapshot(struct dp_catalog *dp_catalog, struct msm_disp_state *disp_state) +void msm_dp_catalog_snapshot(struct msm_dp_catalog *msm_dp_catalog, struct msm_disp_state *disp_state) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); struct dss_io_data *dss = &catalog->io; msm_disp_snapshot_add_block(disp_state, dss->ahb.len, dss->ahb.base, "dp_ahb"); @@ -95,12 +95,12 @@ void dp_catalog_snapshot(struct dp_catalog *dp_catalog, struct msm_disp_state *d msm_disp_snapshot_add_block(disp_state, dss->p0.len, dss->p0.base, "dp_p0"); } -static inline u32 dp_read_aux(struct dp_catalog_private *catalog, u32 offset) +static inline u32 msm_dp_read_aux(struct msm_dp_catalog_private *catalog, u32 offset) { return readl_relaxed(catalog->io.aux.base + offset); } -static inline void dp_write_aux(struct dp_catalog_private *catalog, +static inline void msm_dp_write_aux(struct msm_dp_catalog_private *catalog, u32 offset, u32 data) { /* @@ -110,12 +110,12 @@ static inline void dp_write_aux(struct dp_catalog_private *catalog, writel(data, catalog->io.aux.base + offset); } -static inline u32 dp_read_ahb(const struct dp_catalog_private *catalog, u32 offset) +static inline u32 msm_dp_read_ahb(const struct msm_dp_catalog_private *catalog, u32 offset) { return readl_relaxed(catalog->io.ahb.base + offset); } -static inline void dp_write_ahb(struct dp_catalog_private *catalog, +static inline void msm_dp_write_ahb(struct msm_dp_catalog_private *catalog, u32 offset, u32 data) { /* @@ -125,7 +125,7 @@ static inline void dp_write_ahb(struct dp_catalog_private *catalog, writel(data, catalog->io.ahb.base + offset); } -static inline void dp_write_p0(struct dp_catalog_private *catalog, +static inline void msm_dp_write_p0(struct msm_dp_catalog_private *catalog, u32 offset, u32 data) { /* @@ -135,7 +135,7 @@ static inline void dp_write_p0(struct dp_catalog_private *catalog, writel(data, catalog->io.p0.base + offset); } -static inline u32 dp_read_p0(struct dp_catalog_private *catalog, +static inline u32 msm_dp_read_p0(struct msm_dp_catalog_private *catalog, u32 offset) { /* @@ -145,12 +145,12 @@ static inline u32 dp_read_p0(struct dp_catalog_private *catalog, return readl_relaxed(catalog->io.p0.base + offset); } -static inline u32 dp_read_link(struct dp_catalog_private *catalog, u32 offset) +static inline u32 msm_dp_read_link(struct msm_dp_catalog_private *catalog, u32 offset) { return readl_relaxed(catalog->io.link.base + offset); } -static inline void dp_write_link(struct dp_catalog_private *catalog, +static inline void msm_dp_write_link(struct msm_dp_catalog_private *catalog, u32 offset, u32 data) { /* @@ -161,64 +161,64 @@ static inline void dp_write_link(struct dp_catalog_private *catalog, } /* aux related catalog functions */ -u32 dp_catalog_aux_read_data(struct dp_catalog *dp_catalog) +u32 msm_dp_catalog_aux_read_data(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - return dp_read_aux(catalog, REG_DP_AUX_DATA); + return msm_dp_read_aux(catalog, REG_DP_AUX_DATA); } -int dp_catalog_aux_write_data(struct dp_catalog *dp_catalog, u32 data) +int msm_dp_catalog_aux_write_data(struct msm_dp_catalog *msm_dp_catalog, u32 data) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - dp_write_aux(catalog, REG_DP_AUX_DATA, data); + msm_dp_write_aux(catalog, REG_DP_AUX_DATA, data); return 0; } -int dp_catalog_aux_write_trans(struct dp_catalog *dp_catalog, u32 data) +int msm_dp_catalog_aux_write_trans(struct msm_dp_catalog *msm_dp_catalog, u32 data) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, data); + msm_dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, data); return 0; } -int dp_catalog_aux_clear_trans(struct dp_catalog *dp_catalog, bool read) +int msm_dp_catalog_aux_clear_trans(struct msm_dp_catalog *msm_dp_catalog, bool read) { u32 data; - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); if (read) { - data = dp_read_aux(catalog, REG_DP_AUX_TRANS_CTRL); + data = msm_dp_read_aux(catalog, REG_DP_AUX_TRANS_CTRL); data &= ~DP_AUX_TRANS_CTRL_GO; - dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, data); + msm_dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, data); } else { - dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, 0); + msm_dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, 0); } return 0; } -int dp_catalog_aux_clear_hw_interrupts(struct dp_catalog *dp_catalog) +int msm_dp_catalog_aux_clear_hw_interrupts(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - dp_read_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_STATUS); - dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0x1f); - dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0x9f); - dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0); + msm_dp_read_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_STATUS); + msm_dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0x1f); + msm_dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0x9f); + msm_dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0); return 0; } /** - * dp_catalog_aux_reset() - reset AUX controller + * msm_dp_catalog_aux_reset() - reset AUX controller * - * @dp_catalog: DP catalog structure + * @msm_dp_catalog: DP catalog structure * * return: void * @@ -227,47 +227,47 @@ int dp_catalog_aux_clear_hw_interrupts(struct dp_catalog *dp_catalog) * NOTE: reset AUX controller will also clear any pending HPD related interrupts * */ -void dp_catalog_aux_reset(struct dp_catalog *dp_catalog) +void msm_dp_catalog_aux_reset(struct msm_dp_catalog *msm_dp_catalog) { u32 aux_ctrl; - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - aux_ctrl = dp_read_aux(catalog, REG_DP_AUX_CTRL); + aux_ctrl = msm_dp_read_aux(catalog, REG_DP_AUX_CTRL); aux_ctrl |= DP_AUX_CTRL_RESET; - dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl); + msm_dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl); usleep_range(1000, 1100); /* h/w recommended delay */ aux_ctrl &= ~DP_AUX_CTRL_RESET; - dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl); + msm_dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl); } -void dp_catalog_aux_enable(struct dp_catalog *dp_catalog, bool enable) +void msm_dp_catalog_aux_enable(struct msm_dp_catalog *msm_dp_catalog, bool enable) { u32 aux_ctrl; - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - aux_ctrl = dp_read_aux(catalog, REG_DP_AUX_CTRL); + aux_ctrl = msm_dp_read_aux(catalog, REG_DP_AUX_CTRL); if (enable) { - dp_write_aux(catalog, REG_DP_TIMEOUT_COUNT, 0xffff); - dp_write_aux(catalog, REG_DP_AUX_LIMITS, 0xffff); + msm_dp_write_aux(catalog, REG_DP_TIMEOUT_COUNT, 0xffff); + msm_dp_write_aux(catalog, REG_DP_AUX_LIMITS, 0xffff); aux_ctrl |= DP_AUX_CTRL_ENABLE; } else { aux_ctrl &= ~DP_AUX_CTRL_ENABLE; } - dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl); + msm_dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl); } -int dp_catalog_aux_wait_for_hpd_connect_state(struct dp_catalog *dp_catalog, +int msm_dp_catalog_aux_wait_for_hpd_connect_state(struct msm_dp_catalog *msm_dp_catalog, unsigned long wait_us) { u32 state; - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); /* poll for hpd connected status every 2ms and timeout after wait_us */ return readl_poll_timeout(catalog->io.aux.base + @@ -294,10 +294,10 @@ static void dump_regs(void __iomem *base, int len) } } -void dp_catalog_dump_regs(struct dp_catalog *dp_catalog) +void msm_dp_catalog_dump_regs(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); struct dss_io_data *io = &catalog->io; pr_info("AHB regs\n"); @@ -313,17 +313,17 @@ void dp_catalog_dump_regs(struct dp_catalog *dp_catalog) dump_regs(io->p0.base, io->p0.len); } -u32 dp_catalog_aux_get_irq(struct dp_catalog *dp_catalog) +u32 msm_dp_catalog_aux_get_irq(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); u32 intr, intr_ack; - intr = dp_read_ahb(catalog, REG_DP_INTR_STATUS); + intr = msm_dp_read_ahb(catalog, REG_DP_INTR_STATUS); intr &= ~DP_INTERRUPT_STATUS1_MASK; intr_ack = (intr & DP_INTERRUPT_STATUS1) << DP_INTERRUPT_STATUS_ACK_SHIFT; - dp_write_ahb(catalog, REG_DP_INTR_STATUS, intr_ack | + msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS, intr_ack | DP_INTERRUPT_STATUS1_MASK); return intr; @@ -331,40 +331,40 @@ u32 dp_catalog_aux_get_irq(struct dp_catalog *dp_catalog) } /* controller related catalog functions */ -void dp_catalog_ctrl_update_transfer_unit(struct dp_catalog *dp_catalog, - u32 dp_tu, u32 valid_boundary, +void msm_dp_catalog_ctrl_update_transfer_unit(struct msm_dp_catalog *msm_dp_catalog, + u32 msm_dp_tu, u32 valid_boundary, u32 valid_boundary2) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - dp_write_link(catalog, REG_DP_VALID_BOUNDARY, valid_boundary); - dp_write_link(catalog, REG_DP_TU, dp_tu); - dp_write_link(catalog, REG_DP_VALID_BOUNDARY_2, valid_boundary2); + msm_dp_write_link(catalog, REG_DP_VALID_BOUNDARY, valid_boundary); + msm_dp_write_link(catalog, REG_DP_TU, msm_dp_tu); + msm_dp_write_link(catalog, REG_DP_VALID_BOUNDARY_2, valid_boundary2); } -void dp_catalog_ctrl_state_ctrl(struct dp_catalog *dp_catalog, u32 state) +void msm_dp_catalog_ctrl_state_ctrl(struct msm_dp_catalog *msm_dp_catalog, u32 state) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - dp_write_link(catalog, REG_DP_STATE_CTRL, state); + msm_dp_write_link(catalog, REG_DP_STATE_CTRL, state); } -void dp_catalog_ctrl_config_ctrl(struct dp_catalog *dp_catalog, u32 cfg) +void msm_dp_catalog_ctrl_config_ctrl(struct msm_dp_catalog *msm_dp_catalog, u32 cfg) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); drm_dbg_dp(catalog->drm_dev, "DP_CONFIGURATION_CTRL=0x%x\n", cfg); - dp_write_link(catalog, REG_DP_CONFIGURATION_CTRL, cfg); + msm_dp_write_link(catalog, REG_DP_CONFIGURATION_CTRL, cfg); } -void dp_catalog_ctrl_lane_mapping(struct dp_catalog *dp_catalog) +void msm_dp_catalog_ctrl_lane_mapping(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); u32 ln_0 = 0, ln_1 = 1, ln_2 = 2, ln_3 = 3; /* One-to-One mapping */ u32 ln_mapping; @@ -373,71 +373,71 @@ void dp_catalog_ctrl_lane_mapping(struct dp_catalog *dp_catalog) ln_mapping |= ln_2 << LANE2_MAPPING_SHIFT; ln_mapping |= ln_3 << LANE3_MAPPING_SHIFT; - dp_write_link(catalog, REG_DP_LOGICAL2PHYSICAL_LANE_MAPPING, + msm_dp_write_link(catalog, REG_DP_LOGICAL2PHYSICAL_LANE_MAPPING, ln_mapping); } -void dp_catalog_ctrl_psr_mainlink_enable(struct dp_catalog *dp_catalog, +void msm_dp_catalog_ctrl_psr_mainlink_enable(struct msm_dp_catalog *msm_dp_catalog, bool enable) { u32 val; - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - val = dp_read_link(catalog, REG_DP_MAINLINK_CTRL); + val = msm_dp_read_link(catalog, REG_DP_MAINLINK_CTRL); if (enable) val |= DP_MAINLINK_CTRL_ENABLE; else val &= ~DP_MAINLINK_CTRL_ENABLE; - dp_write_link(catalog, REG_DP_MAINLINK_CTRL, val); + msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, val); } -void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog *dp_catalog, +void msm_dp_catalog_ctrl_mainlink_ctrl(struct msm_dp_catalog *msm_dp_catalog, bool enable) { u32 mainlink_ctrl; - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); drm_dbg_dp(catalog->drm_dev, "enable=%d\n", enable); if (enable) { /* * To make sure link reg writes happens before other operation, - * dp_write_link() function uses writel() + * msm_dp_write_link() function uses writel() */ - mainlink_ctrl = dp_read_link(catalog, REG_DP_MAINLINK_CTRL); + mainlink_ctrl = msm_dp_read_link(catalog, REG_DP_MAINLINK_CTRL); mainlink_ctrl &= ~(DP_MAINLINK_CTRL_RESET | DP_MAINLINK_CTRL_ENABLE); - dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); + msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); mainlink_ctrl |= DP_MAINLINK_CTRL_RESET; - dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); + msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); mainlink_ctrl &= ~DP_MAINLINK_CTRL_RESET; - dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); + msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); mainlink_ctrl |= (DP_MAINLINK_CTRL_ENABLE | DP_MAINLINK_FB_BOUNDARY_SEL); - dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); + msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); } else { - mainlink_ctrl = dp_read_link(catalog, REG_DP_MAINLINK_CTRL); + mainlink_ctrl = msm_dp_read_link(catalog, REG_DP_MAINLINK_CTRL); mainlink_ctrl &= ~DP_MAINLINK_CTRL_ENABLE; - dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); + msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); } } -void dp_catalog_ctrl_config_misc(struct dp_catalog *dp_catalog, +void msm_dp_catalog_ctrl_config_misc(struct msm_dp_catalog *msm_dp_catalog, u32 colorimetry_cfg, u32 test_bits_depth) { u32 misc_val; - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - misc_val = dp_read_link(catalog, REG_DP_MISC1_MISC0); + misc_val = msm_dp_read_link(catalog, REG_DP_MISC1_MISC0); /* clear bpp bits */ misc_val &= ~(0x07 << DP_MISC0_TEST_BITS_DEPTH_SHIFT); @@ -447,27 +447,27 @@ void dp_catalog_ctrl_config_misc(struct dp_catalog *dp_catalog, misc_val |= DP_MISC0_SYNCHRONOUS_CLK; drm_dbg_dp(catalog->drm_dev, "misc settings = 0x%x\n", misc_val); - dp_write_link(catalog, REG_DP_MISC1_MISC0, misc_val); + msm_dp_write_link(catalog, REG_DP_MISC1_MISC0, misc_val); } -void dp_catalog_setup_peripheral_flush(struct dp_catalog *dp_catalog) +void msm_dp_catalog_setup_peripheral_flush(struct msm_dp_catalog *msm_dp_catalog) { u32 mainlink_ctrl, hw_revision; - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - mainlink_ctrl = dp_read_link(catalog, REG_DP_MAINLINK_CTRL); + mainlink_ctrl = msm_dp_read_link(catalog, REG_DP_MAINLINK_CTRL); - hw_revision = dp_catalog_hw_revision(dp_catalog); + hw_revision = msm_dp_catalog_hw_revision(msm_dp_catalog); if (hw_revision >= DP_HW_VERSION_1_2) mainlink_ctrl |= DP_MAINLINK_FLUSH_MODE_SDE_PERIPH_UPDATE; else mainlink_ctrl |= DP_MAINLINK_FLUSH_MODE_UPDATE_SDP; - dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); + msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); } -void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog, +void msm_dp_catalog_ctrl_config_msa(struct msm_dp_catalog *msm_dp_catalog, u32 rate, u32 stream_rate_khz, bool is_ycbcr_420) { @@ -478,8 +478,8 @@ void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog, u32 const link_rate_hbr3 = 810000; unsigned long den, num; - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); if (rate == link_rate_hbr3) pixel_div = 6; @@ -522,22 +522,22 @@ void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog, nvid *= 3; drm_dbg_dp(catalog->drm_dev, "mvid=0x%x, nvid=0x%x\n", mvid, nvid); - dp_write_link(catalog, REG_DP_SOFTWARE_MVID, mvid); - dp_write_link(catalog, REG_DP_SOFTWARE_NVID, nvid); - dp_write_p0(catalog, MMSS_DP_DSC_DTO, 0x0); + msm_dp_write_link(catalog, REG_DP_SOFTWARE_MVID, mvid); + msm_dp_write_link(catalog, REG_DP_SOFTWARE_NVID, nvid); + msm_dp_write_p0(catalog, MMSS_DP_DSC_DTO, 0x0); } -int dp_catalog_ctrl_set_pattern_state_bit(struct dp_catalog *dp_catalog, +int msm_dp_catalog_ctrl_set_pattern_state_bit(struct msm_dp_catalog *msm_dp_catalog, u32 state_bit) { int bit, ret; u32 data; - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); bit = BIT(state_bit - 1); drm_dbg_dp(catalog->drm_dev, "hw: bit=%d train=%d\n", bit, state_bit); - dp_catalog_ctrl_state_ctrl(dp_catalog, bit); + msm_dp_catalog_ctrl_state_ctrl(msm_dp_catalog, bit); bit = BIT(state_bit - 1) << DP_MAINLINK_READY_LINK_TRAINING_SHIFT; @@ -554,25 +554,25 @@ int dp_catalog_ctrl_set_pattern_state_bit(struct dp_catalog *dp_catalog, } /** - * dp_catalog_hw_revision() - retrieve DP hw revision + * msm_dp_catalog_hw_revision() - retrieve DP hw revision * - * @dp_catalog: DP catalog structure + * @msm_dp_catalog: DP catalog structure * * Return: DP controller hw revision * */ -u32 dp_catalog_hw_revision(const struct dp_catalog *dp_catalog) +u32 msm_dp_catalog_hw_revision(const struct msm_dp_catalog *msm_dp_catalog) { - const struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + const struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - return dp_read_ahb(catalog, REG_DP_HW_VERSION); + return msm_dp_read_ahb(catalog, REG_DP_HW_VERSION); } /** - * dp_catalog_ctrl_reset() - reset DP controller + * msm_dp_catalog_ctrl_reset() - reset DP controller * - * @dp_catalog: DP catalog structure + * @msm_dp_catalog: DP catalog structure * * return: void * @@ -581,28 +581,28 @@ u32 dp_catalog_hw_revision(const struct dp_catalog *dp_catalog) * NOTE: reset DP controller will also clear any pending HPD related interrupts * */ -void dp_catalog_ctrl_reset(struct dp_catalog *dp_catalog) +void msm_dp_catalog_ctrl_reset(struct msm_dp_catalog *msm_dp_catalog) { u32 sw_reset; - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - sw_reset = dp_read_ahb(catalog, REG_DP_SW_RESET); + sw_reset = msm_dp_read_ahb(catalog, REG_DP_SW_RESET); sw_reset |= DP_SW_RESET; - dp_write_ahb(catalog, REG_DP_SW_RESET, sw_reset); + msm_dp_write_ahb(catalog, REG_DP_SW_RESET, sw_reset); usleep_range(1000, 1100); /* h/w recommended delay */ sw_reset &= ~DP_SW_RESET; - dp_write_ahb(catalog, REG_DP_SW_RESET, sw_reset); + msm_dp_write_ahb(catalog, REG_DP_SW_RESET, sw_reset); } -bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog *dp_catalog) +bool msm_dp_catalog_ctrl_mainlink_ready(struct msm_dp_catalog *msm_dp_catalog) { u32 data; int ret; - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); /* Poll for mainlink ready status */ ret = readl_poll_timeout(catalog->io.link.base + @@ -617,96 +617,96 @@ bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog *dp_catalog) return true; } -void dp_catalog_ctrl_enable_irq(struct dp_catalog *dp_catalog, +void msm_dp_catalog_ctrl_enable_irq(struct msm_dp_catalog *msm_dp_catalog, bool enable) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); if (enable) { - dp_write_ahb(catalog, REG_DP_INTR_STATUS, + msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS, DP_INTERRUPT_STATUS1_MASK); - dp_write_ahb(catalog, REG_DP_INTR_STATUS2, + msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS2, DP_INTERRUPT_STATUS2_MASK); } else { - dp_write_ahb(catalog, REG_DP_INTR_STATUS, 0x00); - dp_write_ahb(catalog, REG_DP_INTR_STATUS2, 0x00); + msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS, 0x00); + msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS2, 0x00); } } -void dp_catalog_hpd_config_intr(struct dp_catalog *dp_catalog, +void msm_dp_catalog_hpd_config_intr(struct msm_dp_catalog *msm_dp_catalog, u32 intr_mask, bool en) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - u32 config = dp_read_aux(catalog, REG_DP_DP_HPD_INT_MASK); + u32 config = msm_dp_read_aux(catalog, REG_DP_DP_HPD_INT_MASK); config = (en ? config | intr_mask : config & ~intr_mask); drm_dbg_dp(catalog->drm_dev, "intr_mask=%#x config=%#x\n", intr_mask, config); - dp_write_aux(catalog, REG_DP_DP_HPD_INT_MASK, + msm_dp_write_aux(catalog, REG_DP_DP_HPD_INT_MASK, config & DP_DP_HPD_INT_MASK); } -void dp_catalog_ctrl_hpd_enable(struct dp_catalog *dp_catalog) +void msm_dp_catalog_ctrl_hpd_enable(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - u32 reftimer = dp_read_aux(catalog, REG_DP_DP_HPD_REFTIMER); + u32 reftimer = msm_dp_read_aux(catalog, REG_DP_DP_HPD_REFTIMER); /* Configure REFTIMER and enable it */ reftimer |= DP_DP_HPD_REFTIMER_ENABLE; - dp_write_aux(catalog, REG_DP_DP_HPD_REFTIMER, reftimer); + msm_dp_write_aux(catalog, REG_DP_DP_HPD_REFTIMER, reftimer); /* Enable HPD */ - dp_write_aux(catalog, REG_DP_DP_HPD_CTRL, DP_DP_HPD_CTRL_HPD_EN); + msm_dp_write_aux(catalog, REG_DP_DP_HPD_CTRL, DP_DP_HPD_CTRL_HPD_EN); } -void dp_catalog_ctrl_hpd_disable(struct dp_catalog *dp_catalog) +void msm_dp_catalog_ctrl_hpd_disable(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - u32 reftimer = dp_read_aux(catalog, REG_DP_DP_HPD_REFTIMER); + u32 reftimer = msm_dp_read_aux(catalog, REG_DP_DP_HPD_REFTIMER); reftimer &= ~DP_DP_HPD_REFTIMER_ENABLE; - dp_write_aux(catalog, REG_DP_DP_HPD_REFTIMER, reftimer); + msm_dp_write_aux(catalog, REG_DP_DP_HPD_REFTIMER, reftimer); - dp_write_aux(catalog, REG_DP_DP_HPD_CTRL, 0); + msm_dp_write_aux(catalog, REG_DP_DP_HPD_CTRL, 0); } -static void dp_catalog_enable_sdp(struct dp_catalog_private *catalog) +static void msm_dp_catalog_enable_sdp(struct msm_dp_catalog_private *catalog) { /* trigger sdp */ - dp_write_link(catalog, MMSS_DP_SDP_CFG3, UPDATE_SDP); - dp_write_link(catalog, MMSS_DP_SDP_CFG3, 0x0); + msm_dp_write_link(catalog, MMSS_DP_SDP_CFG3, UPDATE_SDP); + msm_dp_write_link(catalog, MMSS_DP_SDP_CFG3, 0x0); } -void dp_catalog_ctrl_config_psr(struct dp_catalog *dp_catalog) +void msm_dp_catalog_ctrl_config_psr(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); u32 config; /* enable PSR1 function */ - config = dp_read_link(catalog, REG_PSR_CONFIG); + config = msm_dp_read_link(catalog, REG_PSR_CONFIG); config |= PSR1_SUPPORTED; - dp_write_link(catalog, REG_PSR_CONFIG, config); + msm_dp_write_link(catalog, REG_PSR_CONFIG, config); - dp_write_ahb(catalog, REG_DP_INTR_MASK4, DP_INTERRUPT_MASK4); - dp_catalog_enable_sdp(catalog); + msm_dp_write_ahb(catalog, REG_DP_INTR_MASK4, DP_INTERRUPT_MASK4); + msm_dp_catalog_enable_sdp(catalog); } -void dp_catalog_ctrl_set_psr(struct dp_catalog *dp_catalog, bool enter) +void msm_dp_catalog_ctrl_set_psr(struct msm_dp_catalog *msm_dp_catalog, bool enter) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); u32 cmd; - cmd = dp_read_link(catalog, REG_PSR_CMD); + cmd = msm_dp_read_link(catalog, REG_PSR_CMD); cmd &= ~(PSR_ENTER | PSR_EXIT); @@ -715,17 +715,17 @@ void dp_catalog_ctrl_set_psr(struct dp_catalog *dp_catalog, bool enter) else cmd |= PSR_EXIT; - dp_catalog_enable_sdp(catalog); - dp_write_link(catalog, REG_PSR_CMD, cmd); + msm_dp_catalog_enable_sdp(catalog); + msm_dp_write_link(catalog, REG_PSR_CMD, cmd); } -u32 dp_catalog_link_is_connected(struct dp_catalog *dp_catalog) +u32 msm_dp_catalog_link_is_connected(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); u32 status; - status = dp_read_aux(catalog, REG_DP_DP_HPD_INT_STATUS); + status = msm_dp_read_aux(catalog, REG_DP_DP_HPD_INT_STATUS); drm_dbg_dp(catalog->drm_dev, "aux status: %#x\n", status); status >>= DP_DP_HPD_STATE_STATUS_BITS_SHIFT; status &= DP_DP_HPD_STATE_STATUS_BITS_MASK; @@ -733,16 +733,16 @@ u32 dp_catalog_link_is_connected(struct dp_catalog *dp_catalog) return status; } -u32 dp_catalog_hpd_get_intr_status(struct dp_catalog *dp_catalog) +u32 msm_dp_catalog_hpd_get_intr_status(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); int isr, mask; - isr = dp_read_aux(catalog, REG_DP_DP_HPD_INT_STATUS); - dp_write_aux(catalog, REG_DP_DP_HPD_INT_ACK, + isr = msm_dp_read_aux(catalog, REG_DP_DP_HPD_INT_STATUS); + msm_dp_write_aux(catalog, REG_DP_DP_HPD_INT_ACK, (isr & DP_DP_HPD_INT_MASK)); - mask = dp_read_aux(catalog, REG_DP_DP_HPD_INT_MASK); + mask = msm_dp_read_aux(catalog, REG_DP_DP_HPD_INT_MASK); /* * We only want to return interrupts that are unmasked to the caller. @@ -754,115 +754,115 @@ u32 dp_catalog_hpd_get_intr_status(struct dp_catalog *dp_catalog) return isr & (mask | ~DP_DP_HPD_INT_MASK); } -u32 dp_catalog_ctrl_read_psr_interrupt_status(struct dp_catalog *dp_catalog) +u32 msm_dp_catalog_ctrl_read_psr_interrupt_status(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); u32 intr, intr_ack; - intr = dp_read_ahb(catalog, REG_DP_INTR_STATUS4); + intr = msm_dp_read_ahb(catalog, REG_DP_INTR_STATUS4); intr_ack = (intr & DP_INTERRUPT_STATUS4) << DP_INTERRUPT_STATUS_ACK_SHIFT; - dp_write_ahb(catalog, REG_DP_INTR_STATUS4, intr_ack); + msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS4, intr_ack); return intr; } -int dp_catalog_ctrl_get_interrupt(struct dp_catalog *dp_catalog) +int msm_dp_catalog_ctrl_get_interrupt(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); u32 intr, intr_ack; - intr = dp_read_ahb(catalog, REG_DP_INTR_STATUS2); + intr = msm_dp_read_ahb(catalog, REG_DP_INTR_STATUS2); intr &= ~DP_INTERRUPT_STATUS2_MASK; intr_ack = (intr & DP_INTERRUPT_STATUS2) << DP_INTERRUPT_STATUS_ACK_SHIFT; - dp_write_ahb(catalog, REG_DP_INTR_STATUS2, + msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS2, intr_ack | DP_INTERRUPT_STATUS2_MASK); return intr; } -void dp_catalog_ctrl_phy_reset(struct dp_catalog *dp_catalog) +void msm_dp_catalog_ctrl_phy_reset(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - dp_write_ahb(catalog, REG_DP_PHY_CTRL, + msm_dp_write_ahb(catalog, REG_DP_PHY_CTRL, DP_PHY_CTRL_SW_RESET | DP_PHY_CTRL_SW_RESET_PLL); usleep_range(1000, 1100); /* h/w recommended delay */ - dp_write_ahb(catalog, REG_DP_PHY_CTRL, 0x0); + msm_dp_write_ahb(catalog, REG_DP_PHY_CTRL, 0x0); } -void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog *dp_catalog, +void msm_dp_catalog_ctrl_send_phy_pattern(struct msm_dp_catalog *msm_dp_catalog, u32 pattern) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); u32 value = 0x0; /* Make sure to clear the current pattern before starting a new one */ - dp_write_link(catalog, REG_DP_STATE_CTRL, 0x0); + msm_dp_write_link(catalog, REG_DP_STATE_CTRL, 0x0); drm_dbg_dp(catalog->drm_dev, "pattern: %#x\n", pattern); switch (pattern) { case DP_PHY_TEST_PATTERN_D10_2: - dp_write_link(catalog, REG_DP_STATE_CTRL, + msm_dp_write_link(catalog, REG_DP_STATE_CTRL, DP_STATE_CTRL_LINK_TRAINING_PATTERN1); break; case DP_PHY_TEST_PATTERN_ERROR_COUNT: value &= ~(1 << 16); - dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, + msm_dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, value); value |= SCRAMBLER_RESET_COUNT_VALUE; - dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, + msm_dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, value); - dp_write_link(catalog, REG_DP_MAINLINK_LEVELS, + msm_dp_write_link(catalog, REG_DP_MAINLINK_LEVELS, DP_MAINLINK_SAFE_TO_EXIT_LEVEL_2); - dp_write_link(catalog, REG_DP_STATE_CTRL, + msm_dp_write_link(catalog, REG_DP_STATE_CTRL, DP_STATE_CTRL_LINK_SYMBOL_ERR_MEASURE); break; case DP_PHY_TEST_PATTERN_PRBS7: - dp_write_link(catalog, REG_DP_STATE_CTRL, + msm_dp_write_link(catalog, REG_DP_STATE_CTRL, DP_STATE_CTRL_LINK_PRBS7); break; case DP_PHY_TEST_PATTERN_80BIT_CUSTOM: - dp_write_link(catalog, REG_DP_STATE_CTRL, + msm_dp_write_link(catalog, REG_DP_STATE_CTRL, DP_STATE_CTRL_LINK_TEST_CUSTOM_PATTERN); /* 00111110000011111000001111100000 */ - dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG0, + msm_dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG0, 0x3E0F83E0); /* 00001111100000111110000011111000 */ - dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG1, + msm_dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG1, 0x0F83E0F8); /* 1111100000111110 */ - dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG2, + msm_dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG2, 0x0000F83E); break; case DP_PHY_TEST_PATTERN_CP2520: - value = dp_read_link(catalog, REG_DP_MAINLINK_CTRL); + value = msm_dp_read_link(catalog, REG_DP_MAINLINK_CTRL); value &= ~DP_MAINLINK_CTRL_SW_BYPASS_SCRAMBLER; - dp_write_link(catalog, REG_DP_MAINLINK_CTRL, value); + msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, value); value = DP_HBR2_ERM_PATTERN; - dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, + msm_dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, value); value |= SCRAMBLER_RESET_COUNT_VALUE; - dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, + msm_dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, value); - dp_write_link(catalog, REG_DP_MAINLINK_LEVELS, + msm_dp_write_link(catalog, REG_DP_MAINLINK_LEVELS, DP_MAINLINK_SAFE_TO_EXIT_LEVEL_2); - dp_write_link(catalog, REG_DP_STATE_CTRL, + msm_dp_write_link(catalog, REG_DP_STATE_CTRL, DP_STATE_CTRL_LINK_SYMBOL_ERR_MEASURE); - value = dp_read_link(catalog, REG_DP_MAINLINK_CTRL); + value = msm_dp_read_link(catalog, REG_DP_MAINLINK_CTRL); value |= DP_MAINLINK_CTRL_ENABLE; - dp_write_link(catalog, REG_DP_MAINLINK_CTRL, value); + msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, value); break; case DP_PHY_TEST_PATTERN_SEL_MASK: - dp_write_link(catalog, REG_DP_MAINLINK_CTRL, + msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, DP_MAINLINK_CTRL_ENABLE); - dp_write_link(catalog, REG_DP_STATE_CTRL, + msm_dp_write_link(catalog, REG_DP_STATE_CTRL, DP_STATE_CTRL_LINK_TRAINING_PATTERN4); break; default: @@ -872,94 +872,94 @@ void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog *dp_catalog, } } -u32 dp_catalog_ctrl_read_phy_pattern(struct dp_catalog *dp_catalog) +u32 msm_dp_catalog_ctrl_read_phy_pattern(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - return dp_read_link(catalog, REG_DP_MAINLINK_READY); + return msm_dp_read_link(catalog, REG_DP_MAINLINK_READY); } /* panel related catalog functions */ -int dp_catalog_panel_timing_cfg(struct dp_catalog *dp_catalog, u32 total, - u32 sync_start, u32 width_blanking, u32 dp_active) +int msm_dp_catalog_panel_timing_cfg(struct msm_dp_catalog *msm_dp_catalog, u32 total, + u32 sync_start, u32 width_blanking, u32 msm_dp_active) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); u32 reg; - dp_write_link(catalog, REG_DP_TOTAL_HOR_VER, total); - dp_write_link(catalog, REG_DP_START_HOR_VER_FROM_SYNC, sync_start); - dp_write_link(catalog, REG_DP_HSYNC_VSYNC_WIDTH_POLARITY, width_blanking); - dp_write_link(catalog, REG_DP_ACTIVE_HOR_VER, dp_active); + msm_dp_write_link(catalog, REG_DP_TOTAL_HOR_VER, total); + msm_dp_write_link(catalog, REG_DP_START_HOR_VER_FROM_SYNC, sync_start); + msm_dp_write_link(catalog, REG_DP_HSYNC_VSYNC_WIDTH_POLARITY, width_blanking); + msm_dp_write_link(catalog, REG_DP_ACTIVE_HOR_VER, msm_dp_active); - reg = dp_read_p0(catalog, MMSS_DP_INTF_CONFIG); + reg = msm_dp_read_p0(catalog, MMSS_DP_INTF_CONFIG); - if (dp_catalog->wide_bus_en) + if (msm_dp_catalog->wide_bus_en) reg |= DP_INTF_CONFIG_DATABUS_WIDEN; else reg &= ~DP_INTF_CONFIG_DATABUS_WIDEN; - DRM_DEBUG_DP("wide_bus_en=%d reg=%#x\n", dp_catalog->wide_bus_en, reg); + DRM_DEBUG_DP("wide_bus_en=%d reg=%#x\n", msm_dp_catalog->wide_bus_en, reg); - dp_write_p0(catalog, MMSS_DP_INTF_CONFIG, reg); + msm_dp_write_p0(catalog, MMSS_DP_INTF_CONFIG, reg); return 0; } -static void dp_catalog_panel_send_vsc_sdp(struct dp_catalog *dp_catalog, struct dp_sdp *vsc_sdp) +static void msm_dp_catalog_panel_send_vsc_sdp(struct msm_dp_catalog *msm_dp_catalog, struct dp_sdp *vsc_sdp) { - struct dp_catalog_private *catalog; + struct msm_dp_catalog_private *catalog; u32 header[2]; u32 val; int i; - catalog = container_of(dp_catalog, struct dp_catalog_private, dp_catalog); + catalog = container_of(msm_dp_catalog, struct msm_dp_catalog_private, msm_dp_catalog); - dp_utils_pack_sdp_header(&vsc_sdp->sdp_header, header); + msm_dp_utils_pack_sdp_header(&vsc_sdp->sdp_header, header); - dp_write_link(catalog, MMSS_DP_GENERIC0_0, header[0]); - dp_write_link(catalog, MMSS_DP_GENERIC0_1, header[1]); + msm_dp_write_link(catalog, MMSS_DP_GENERIC0_0, header[0]); + msm_dp_write_link(catalog, MMSS_DP_GENERIC0_1, header[1]); for (i = 0; i < sizeof(vsc_sdp->db); i += 4) { val = ((vsc_sdp->db[i]) | (vsc_sdp->db[i + 1] << 8) | (vsc_sdp->db[i + 2] << 16) | (vsc_sdp->db[i + 3] << 24)); - dp_write_link(catalog, MMSS_DP_GENERIC0_2 + i, val); + msm_dp_write_link(catalog, MMSS_DP_GENERIC0_2 + i, val); } } -static void dp_catalog_panel_update_sdp(struct dp_catalog *dp_catalog) +static void msm_dp_catalog_panel_update_sdp(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog; + struct msm_dp_catalog_private *catalog; u32 hw_revision; - catalog = container_of(dp_catalog, struct dp_catalog_private, dp_catalog); + catalog = container_of(msm_dp_catalog, struct msm_dp_catalog_private, msm_dp_catalog); - hw_revision = dp_catalog_hw_revision(dp_catalog); + hw_revision = msm_dp_catalog_hw_revision(msm_dp_catalog); if (hw_revision < DP_HW_VERSION_1_2 && hw_revision >= DP_HW_VERSION_1_0) { - dp_write_link(catalog, MMSS_DP_SDP_CFG3, 0x01); - dp_write_link(catalog, MMSS_DP_SDP_CFG3, 0x00); + msm_dp_write_link(catalog, MMSS_DP_SDP_CFG3, 0x01); + msm_dp_write_link(catalog, MMSS_DP_SDP_CFG3, 0x00); } } -void dp_catalog_panel_enable_vsc_sdp(struct dp_catalog *dp_catalog, struct dp_sdp *vsc_sdp) +void msm_dp_catalog_panel_enable_vsc_sdp(struct msm_dp_catalog *msm_dp_catalog, struct dp_sdp *vsc_sdp) { - struct dp_catalog_private *catalog; + struct msm_dp_catalog_private *catalog; u32 cfg, cfg2, misc; - catalog = container_of(dp_catalog, struct dp_catalog_private, dp_catalog); + catalog = container_of(msm_dp_catalog, struct msm_dp_catalog_private, msm_dp_catalog); - cfg = dp_read_link(catalog, MMSS_DP_SDP_CFG); - cfg2 = dp_read_link(catalog, MMSS_DP_SDP_CFG2); - misc = dp_read_link(catalog, REG_DP_MISC1_MISC0); + cfg = msm_dp_read_link(catalog, MMSS_DP_SDP_CFG); + cfg2 = msm_dp_read_link(catalog, MMSS_DP_SDP_CFG2); + misc = msm_dp_read_link(catalog, REG_DP_MISC1_MISC0); cfg |= GEN0_SDP_EN; - dp_write_link(catalog, MMSS_DP_SDP_CFG, cfg); + msm_dp_write_link(catalog, MMSS_DP_SDP_CFG, cfg); cfg2 |= GENERIC0_SDPSIZE_VALID; - dp_write_link(catalog, MMSS_DP_SDP_CFG2, cfg2); + msm_dp_write_link(catalog, MMSS_DP_SDP_CFG2, cfg2); - dp_catalog_panel_send_vsc_sdp(dp_catalog, vsc_sdp); + msm_dp_catalog_panel_send_vsc_sdp(msm_dp_catalog, vsc_sdp); /* indicates presence of VSC (BIT(6) of MISC1) */ misc |= DP_MISC1_VSC_SDP; @@ -967,27 +967,27 @@ void dp_catalog_panel_enable_vsc_sdp(struct dp_catalog *dp_catalog, struct dp_sd drm_dbg_dp(catalog->drm_dev, "vsc sdp enable=1\n"); pr_debug("misc settings = 0x%x\n", misc); - dp_write_link(catalog, REG_DP_MISC1_MISC0, misc); + msm_dp_write_link(catalog, REG_DP_MISC1_MISC0, misc); - dp_catalog_panel_update_sdp(dp_catalog); + msm_dp_catalog_panel_update_sdp(msm_dp_catalog); } -void dp_catalog_panel_disable_vsc_sdp(struct dp_catalog *dp_catalog) +void msm_dp_catalog_panel_disable_vsc_sdp(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog; + struct msm_dp_catalog_private *catalog; u32 cfg, cfg2, misc; - catalog = container_of(dp_catalog, struct dp_catalog_private, dp_catalog); + catalog = container_of(msm_dp_catalog, struct msm_dp_catalog_private, msm_dp_catalog); - cfg = dp_read_link(catalog, MMSS_DP_SDP_CFG); - cfg2 = dp_read_link(catalog, MMSS_DP_SDP_CFG2); - misc = dp_read_link(catalog, REG_DP_MISC1_MISC0); + cfg = msm_dp_read_link(catalog, MMSS_DP_SDP_CFG); + cfg2 = msm_dp_read_link(catalog, MMSS_DP_SDP_CFG2); + misc = msm_dp_read_link(catalog, REG_DP_MISC1_MISC0); cfg &= ~GEN0_SDP_EN; - dp_write_link(catalog, MMSS_DP_SDP_CFG, cfg); + msm_dp_write_link(catalog, MMSS_DP_SDP_CFG, cfg); cfg2 &= ~GENERIC0_SDPSIZE_VALID; - dp_write_link(catalog, MMSS_DP_SDP_CFG2, cfg2); + msm_dp_write_link(catalog, MMSS_DP_SDP_CFG2, cfg2); /* switch back to MSA */ misc &= ~DP_MISC1_VSC_SDP; @@ -995,16 +995,16 @@ void dp_catalog_panel_disable_vsc_sdp(struct dp_catalog *dp_catalog) drm_dbg_dp(catalog->drm_dev, "vsc sdp enable=0\n"); pr_debug("misc settings = 0x%x\n", misc); - dp_write_link(catalog, REG_DP_MISC1_MISC0, misc); + msm_dp_write_link(catalog, REG_DP_MISC1_MISC0, misc); - dp_catalog_panel_update_sdp(dp_catalog); + msm_dp_catalog_panel_update_sdp(msm_dp_catalog); } -void dp_catalog_panel_tpg_enable(struct dp_catalog *dp_catalog, +void msm_dp_catalog_panel_tpg_enable(struct msm_dp_catalog *msm_dp_catalog, struct drm_display_mode *drm_mode) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); u32 hsync_period, vsync_period; u32 display_v_start, display_v_end; u32 hsync_start_x, hsync_end_x; @@ -1036,49 +1036,49 @@ void dp_catalog_panel_tpg_enable(struct dp_catalog *dp_catalog, display_hctl = (hsync_end_x << 16) | hsync_start_x; - dp_write_p0(catalog, MMSS_DP_INTF_CONFIG, 0x0); - dp_write_p0(catalog, MMSS_DP_INTF_HSYNC_CTL, hsync_ctl); - dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PERIOD_F0, vsync_period * + msm_dp_write_p0(catalog, MMSS_DP_INTF_CONFIG, 0x0); + msm_dp_write_p0(catalog, MMSS_DP_INTF_HSYNC_CTL, hsync_ctl); + msm_dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PERIOD_F0, vsync_period * hsync_period); - dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F0, v_sync_width * + msm_dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F0, v_sync_width * hsync_period); - dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PERIOD_F1, 0); - dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F1, 0); - dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_HCTL, display_hctl); - dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_HCTL, 0); - dp_write_p0(catalog, MMSS_INTF_DISPLAY_V_START_F0, display_v_start); - dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_V_END_F0, display_v_end); - dp_write_p0(catalog, MMSS_INTF_DISPLAY_V_START_F1, 0); - dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_V_END_F1, 0); - dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_START_F0, 0); - dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_END_F0, 0); - dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_START_F1, 0); - dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_END_F1, 0); - dp_write_p0(catalog, MMSS_DP_INTF_POLARITY_CTL, 0); - - dp_write_p0(catalog, MMSS_DP_TPG_MAIN_CONTROL, + msm_dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PERIOD_F1, 0); + msm_dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F1, 0); + msm_dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_HCTL, display_hctl); + msm_dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_HCTL, 0); + msm_dp_write_p0(catalog, MMSS_INTF_DISPLAY_V_START_F0, display_v_start); + msm_dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_V_END_F0, display_v_end); + msm_dp_write_p0(catalog, MMSS_INTF_DISPLAY_V_START_F1, 0); + msm_dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_V_END_F1, 0); + msm_dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_START_F0, 0); + msm_dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_END_F0, 0); + msm_dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_START_F1, 0); + msm_dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_END_F1, 0); + msm_dp_write_p0(catalog, MMSS_DP_INTF_POLARITY_CTL, 0); + + msm_dp_write_p0(catalog, MMSS_DP_TPG_MAIN_CONTROL, DP_TPG_CHECKERED_RECT_PATTERN); - dp_write_p0(catalog, MMSS_DP_TPG_VIDEO_CONFIG, + msm_dp_write_p0(catalog, MMSS_DP_TPG_VIDEO_CONFIG, DP_TPG_VIDEO_CONFIG_BPP_8BIT | DP_TPG_VIDEO_CONFIG_RGB); - dp_write_p0(catalog, MMSS_DP_BIST_ENABLE, + msm_dp_write_p0(catalog, MMSS_DP_BIST_ENABLE, DP_BIST_ENABLE_DPBIST_EN); - dp_write_p0(catalog, MMSS_DP_TIMING_ENGINE_EN, + msm_dp_write_p0(catalog, MMSS_DP_TIMING_ENGINE_EN, DP_TIMING_ENGINE_EN_EN); drm_dbg_dp(catalog->drm_dev, "%s: enabled tpg\n", __func__); } -void dp_catalog_panel_tpg_disable(struct dp_catalog *dp_catalog) +void msm_dp_catalog_panel_tpg_disable(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - dp_write_p0(catalog, MMSS_DP_TPG_MAIN_CONTROL, 0x0); - dp_write_p0(catalog, MMSS_DP_BIST_ENABLE, 0x0); - dp_write_p0(catalog, MMSS_DP_TIMING_ENGINE_EN, 0x0); + msm_dp_write_p0(catalog, MMSS_DP_TPG_MAIN_CONTROL, 0x0); + msm_dp_write_p0(catalog, MMSS_DP_BIST_ENABLE, 0x0); + msm_dp_write_p0(catalog, MMSS_DP_TIMING_ENGINE_EN, 0x0); } -static void __iomem *dp_ioremap(struct platform_device *pdev, int idx, size_t *len) +static void __iomem *msm_dp_ioremap(struct platform_device *pdev, int idx, size_t *len) { struct resource *res; void __iomem *base; @@ -1090,21 +1090,21 @@ static void __iomem *dp_ioremap(struct platform_device *pdev, int idx, size_t *l return base; } -static int dp_catalog_get_io(struct dp_catalog_private *catalog) +static int msm_dp_catalog_get_io(struct msm_dp_catalog_private *catalog) { struct platform_device *pdev = to_platform_device(catalog->dev); struct dss_io_data *dss = &catalog->io; - dss->ahb.base = dp_ioremap(pdev, 0, &dss->ahb.len); + dss->ahb.base = msm_dp_ioremap(pdev, 0, &dss->ahb.len); if (IS_ERR(dss->ahb.base)) return PTR_ERR(dss->ahb.base); - dss->aux.base = dp_ioremap(pdev, 1, &dss->aux.len); + dss->aux.base = msm_dp_ioremap(pdev, 1, &dss->aux.len); if (IS_ERR(dss->aux.base)) { /* * The initial binding had a single reg, but in order to * support variation in the sub-region sizes this was split. - * dp_ioremap() will fail with -EINVAL here if only a single + * msm_dp_ioremap() will fail with -EINVAL here if only a single * reg is specified, so fill in the sub-region offsets and * lengths based on this single region. */ @@ -1126,13 +1126,13 @@ static int dp_catalog_get_io(struct dp_catalog_private *catalog) return PTR_ERR(dss->aux.base); } } else { - dss->link.base = dp_ioremap(pdev, 2, &dss->link.len); + dss->link.base = msm_dp_ioremap(pdev, 2, &dss->link.len); if (IS_ERR(dss->link.base)) { DRM_ERROR("unable to remap link region: %pe\n", dss->link.base); return PTR_ERR(dss->link.base); } - dss->p0.base = dp_ioremap(pdev, 3, &dss->p0.len); + dss->p0.base = msm_dp_ioremap(pdev, 3, &dss->p0.len); if (IS_ERR(dss->p0.base)) { DRM_ERROR("unable to remap p0 region: %pe\n", dss->p0.base); return PTR_ERR(dss->p0.base); @@ -1142,9 +1142,9 @@ static int dp_catalog_get_io(struct dp_catalog_private *catalog) return 0; } -struct dp_catalog *dp_catalog_get(struct device *dev) +struct msm_dp_catalog *msm_dp_catalog_get(struct device *dev) { - struct dp_catalog_private *catalog; + struct msm_dp_catalog_private *catalog; int ret; catalog = devm_kzalloc(dev, sizeof(*catalog), GFP_KERNEL); @@ -1153,78 +1153,78 @@ struct dp_catalog *dp_catalog_get(struct device *dev) catalog->dev = dev; - ret = dp_catalog_get_io(catalog); + ret = msm_dp_catalog_get_io(catalog); if (ret) return ERR_PTR(ret); - return &catalog->dp_catalog; + return &catalog->msm_dp_catalog; } -u32 dp_catalog_audio_get_header(struct dp_catalog *dp_catalog, - enum dp_catalog_audio_sdp_type sdp, - enum dp_catalog_audio_header_type header) +u32 msm_dp_catalog_audio_get_header(struct msm_dp_catalog *msm_dp_catalog, + enum msm_dp_catalog_audio_sdp_type sdp, + enum msm_dp_catalog_audio_header_type header) { - struct dp_catalog_private *catalog; + struct msm_dp_catalog_private *catalog; u32 (*sdp_map)[DP_AUDIO_SDP_HEADER_MAX]; - catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); sdp_map = catalog->audio_map; - return dp_read_link(catalog, sdp_map[sdp][header]); + return msm_dp_read_link(catalog, sdp_map[sdp][header]); } -void dp_catalog_audio_set_header(struct dp_catalog *dp_catalog, - enum dp_catalog_audio_sdp_type sdp, - enum dp_catalog_audio_header_type header, +void msm_dp_catalog_audio_set_header(struct msm_dp_catalog *msm_dp_catalog, + enum msm_dp_catalog_audio_sdp_type sdp, + enum msm_dp_catalog_audio_header_type header, u32 data) { - struct dp_catalog_private *catalog; + struct msm_dp_catalog_private *catalog; u32 (*sdp_map)[DP_AUDIO_SDP_HEADER_MAX]; - if (!dp_catalog) + if (!msm_dp_catalog) return; - catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); sdp_map = catalog->audio_map; - dp_write_link(catalog, sdp_map[sdp][header], data); + msm_dp_write_link(catalog, sdp_map[sdp][header], data); } -void dp_catalog_audio_config_acr(struct dp_catalog *dp_catalog, u32 select) +void msm_dp_catalog_audio_config_acr(struct msm_dp_catalog *msm_dp_catalog, u32 select) { - struct dp_catalog_private *catalog; + struct msm_dp_catalog_private *catalog; u32 acr_ctrl; - if (!dp_catalog) + if (!msm_dp_catalog) return; - catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); acr_ctrl = select << 4 | BIT(31) | BIT(8) | BIT(14); drm_dbg_dp(catalog->drm_dev, "select: %#x, acr_ctrl: %#x\n", select, acr_ctrl); - dp_write_link(catalog, MMSS_DP_AUDIO_ACR_CTRL, acr_ctrl); + msm_dp_write_link(catalog, MMSS_DP_AUDIO_ACR_CTRL, acr_ctrl); } -void dp_catalog_audio_enable(struct dp_catalog *dp_catalog, bool enable) +void msm_dp_catalog_audio_enable(struct msm_dp_catalog *msm_dp_catalog, bool enable) { - struct dp_catalog_private *catalog; + struct msm_dp_catalog_private *catalog; u32 audio_ctrl; - if (!dp_catalog) + if (!msm_dp_catalog) return; - catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - audio_ctrl = dp_read_link(catalog, MMSS_DP_AUDIO_CFG); + audio_ctrl = msm_dp_read_link(catalog, MMSS_DP_AUDIO_CFG); if (enable) audio_ctrl |= BIT(0); @@ -1233,24 +1233,24 @@ void dp_catalog_audio_enable(struct dp_catalog *dp_catalog, bool enable) drm_dbg_dp(catalog->drm_dev, "dp_audio_cfg = 0x%x\n", audio_ctrl); - dp_write_link(catalog, MMSS_DP_AUDIO_CFG, audio_ctrl); + msm_dp_write_link(catalog, MMSS_DP_AUDIO_CFG, audio_ctrl); /* make sure audio engine is disabled */ wmb(); } -void dp_catalog_audio_config_sdp(struct dp_catalog *dp_catalog) +void msm_dp_catalog_audio_config_sdp(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog; + struct msm_dp_catalog_private *catalog; u32 sdp_cfg = 0; u32 sdp_cfg2 = 0; - if (!dp_catalog) + if (!msm_dp_catalog) return; - catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - sdp_cfg = dp_read_link(catalog, MMSS_DP_SDP_CFG); + sdp_cfg = msm_dp_read_link(catalog, MMSS_DP_SDP_CFG); /* AUDIO_TIMESTAMP_SDP_EN */ sdp_cfg |= BIT(1); /* AUDIO_STREAM_SDP_EN */ @@ -1264,9 +1264,9 @@ void dp_catalog_audio_config_sdp(struct dp_catalog *dp_catalog) drm_dbg_dp(catalog->drm_dev, "sdp_cfg = 0x%x\n", sdp_cfg); - dp_write_link(catalog, MMSS_DP_SDP_CFG, sdp_cfg); + msm_dp_write_link(catalog, MMSS_DP_SDP_CFG, sdp_cfg); - sdp_cfg2 = dp_read_link(catalog, MMSS_DP_SDP_CFG2); + sdp_cfg2 = msm_dp_read_link(catalog, MMSS_DP_SDP_CFG2); /* IFRM_REGSRC -> Do not use reg values */ sdp_cfg2 &= ~BIT(0); /* AUDIO_STREAM_HB3_REGSRC-> Do not use reg values */ @@ -1274,12 +1274,12 @@ void dp_catalog_audio_config_sdp(struct dp_catalog *dp_catalog) drm_dbg_dp(catalog->drm_dev, "sdp_cfg2 = 0x%x\n", sdp_cfg2); - dp_write_link(catalog, MMSS_DP_SDP_CFG2, sdp_cfg2); + msm_dp_write_link(catalog, MMSS_DP_SDP_CFG2, sdp_cfg2); } -void dp_catalog_audio_init(struct dp_catalog *dp_catalog) +void msm_dp_catalog_audio_init(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog; + struct msm_dp_catalog_private *catalog; static u32 sdp_map[][DP_AUDIO_SDP_HEADER_MAX] = { { @@ -1309,27 +1309,27 @@ void dp_catalog_audio_init(struct dp_catalog *dp_catalog) }, }; - if (!dp_catalog) + if (!msm_dp_catalog) return; - catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); catalog->audio_map = sdp_map; } -void dp_catalog_audio_sfe_level(struct dp_catalog *dp_catalog, u32 safe_to_exit_level) +void msm_dp_catalog_audio_sfe_level(struct msm_dp_catalog *msm_dp_catalog, u32 safe_to_exit_level) { - struct dp_catalog_private *catalog; + struct msm_dp_catalog_private *catalog; u32 mainlink_levels; - if (!dp_catalog) + if (!msm_dp_catalog) return; - catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - mainlink_levels = dp_read_link(catalog, REG_DP_MAINLINK_LEVELS); + mainlink_levels = msm_dp_read_link(catalog, REG_DP_MAINLINK_LEVELS); mainlink_levels &= 0xFE0; mainlink_levels |= safe_to_exit_level; @@ -1337,5 +1337,5 @@ void dp_catalog_audio_sfe_level(struct dp_catalog *dp_catalog, u32 safe_to_exit_ "mainlink_level = 0x%x, safe_to_exit_level = 0x%x\n", mainlink_levels, safe_to_exit_level); - dp_write_link(catalog, REG_DP_MAINLINK_LEVELS, mainlink_levels); + msm_dp_write_link(catalog, REG_DP_MAINLINK_LEVELS, mainlink_levels); } diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h index 4679d50b8c73..e932b17eecbf 100644 --- a/drivers/gpu/drm/msm/dp/dp_catalog.h +++ b/drivers/gpu/drm/msm/dp/dp_catalog.h @@ -31,7 +31,7 @@ #define DP_HW_VERSION_1_0 0x10000000 #define DP_HW_VERSION_1_2 0x10020000 -enum dp_catalog_audio_sdp_type { +enum msm_dp_catalog_audio_sdp_type { DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_INFOFRAME, @@ -40,89 +40,89 @@ enum dp_catalog_audio_sdp_type { DP_AUDIO_SDP_MAX, }; -enum dp_catalog_audio_header_type { +enum msm_dp_catalog_audio_header_type { DP_AUDIO_SDP_HEADER_1, DP_AUDIO_SDP_HEADER_2, DP_AUDIO_SDP_HEADER_3, DP_AUDIO_SDP_HEADER_MAX, }; -struct dp_catalog { +struct msm_dp_catalog { bool wide_bus_en; }; /* Debug module */ -void dp_catalog_snapshot(struct dp_catalog *dp_catalog, struct msm_disp_state *disp_state); +void msm_dp_catalog_snapshot(struct msm_dp_catalog *msm_dp_catalog, struct msm_disp_state *disp_state); /* AUX APIs */ -u32 dp_catalog_aux_read_data(struct dp_catalog *dp_catalog); -int dp_catalog_aux_write_data(struct dp_catalog *dp_catalog, u32 data); -int dp_catalog_aux_write_trans(struct dp_catalog *dp_catalog, u32 data); -int dp_catalog_aux_clear_trans(struct dp_catalog *dp_catalog, bool read); -int dp_catalog_aux_clear_hw_interrupts(struct dp_catalog *dp_catalog); -void dp_catalog_aux_reset(struct dp_catalog *dp_catalog); -void dp_catalog_aux_enable(struct dp_catalog *dp_catalog, bool enable); -int dp_catalog_aux_wait_for_hpd_connect_state(struct dp_catalog *dp_catalog, +u32 msm_dp_catalog_aux_read_data(struct msm_dp_catalog *msm_dp_catalog); +int msm_dp_catalog_aux_write_data(struct msm_dp_catalog *msm_dp_catalog, u32 data); +int msm_dp_catalog_aux_write_trans(struct msm_dp_catalog *msm_dp_catalog, u32 data); +int msm_dp_catalog_aux_clear_trans(struct msm_dp_catalog *msm_dp_catalog, bool read); +int msm_dp_catalog_aux_clear_hw_interrupts(struct msm_dp_catalog *msm_dp_catalog); +void msm_dp_catalog_aux_reset(struct msm_dp_catalog *msm_dp_catalog); +void msm_dp_catalog_aux_enable(struct msm_dp_catalog *msm_dp_catalog, bool enable); +int msm_dp_catalog_aux_wait_for_hpd_connect_state(struct msm_dp_catalog *msm_dp_catalog, unsigned long wait_us); -u32 dp_catalog_aux_get_irq(struct dp_catalog *dp_catalog); +u32 msm_dp_catalog_aux_get_irq(struct msm_dp_catalog *msm_dp_catalog); /* DP Controller APIs */ -void dp_catalog_ctrl_state_ctrl(struct dp_catalog *dp_catalog, u32 state); -void dp_catalog_ctrl_config_ctrl(struct dp_catalog *dp_catalog, u32 config); -void dp_catalog_ctrl_lane_mapping(struct dp_catalog *dp_catalog); -void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog *dp_catalog, bool enable); -void dp_catalog_ctrl_psr_mainlink_enable(struct dp_catalog *dp_catalog, bool enable); -void dp_catalog_setup_peripheral_flush(struct dp_catalog *dp_catalog); -void dp_catalog_ctrl_config_misc(struct dp_catalog *dp_catalog, u32 cc, u32 tb); -void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog, u32 rate, +void msm_dp_catalog_ctrl_state_ctrl(struct msm_dp_catalog *msm_dp_catalog, u32 state); +void msm_dp_catalog_ctrl_config_ctrl(struct msm_dp_catalog *msm_dp_catalog, u32 config); +void msm_dp_catalog_ctrl_lane_mapping(struct msm_dp_catalog *msm_dp_catalog); +void msm_dp_catalog_ctrl_mainlink_ctrl(struct msm_dp_catalog *msm_dp_catalog, bool enable); +void msm_dp_catalog_ctrl_psr_mainlink_enable(struct msm_dp_catalog *msm_dp_catalog, bool enable); +void msm_dp_catalog_setup_peripheral_flush(struct msm_dp_catalog *msm_dp_catalog); +void msm_dp_catalog_ctrl_config_misc(struct msm_dp_catalog *msm_dp_catalog, u32 cc, u32 tb); +void msm_dp_catalog_ctrl_config_msa(struct msm_dp_catalog *msm_dp_catalog, u32 rate, u32 stream_rate_khz, bool is_ycbcr_420); -int dp_catalog_ctrl_set_pattern_state_bit(struct dp_catalog *dp_catalog, u32 pattern); -u32 dp_catalog_hw_revision(const struct dp_catalog *dp_catalog); -void dp_catalog_ctrl_reset(struct dp_catalog *dp_catalog); -bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog *dp_catalog); -void dp_catalog_ctrl_enable_irq(struct dp_catalog *dp_catalog, bool enable); -void dp_catalog_hpd_config_intr(struct dp_catalog *dp_catalog, +int msm_dp_catalog_ctrl_set_pattern_state_bit(struct msm_dp_catalog *msm_dp_catalog, u32 pattern); +u32 msm_dp_catalog_hw_revision(const struct msm_dp_catalog *msm_dp_catalog); +void msm_dp_catalog_ctrl_reset(struct msm_dp_catalog *msm_dp_catalog); +bool msm_dp_catalog_ctrl_mainlink_ready(struct msm_dp_catalog *msm_dp_catalog); +void msm_dp_catalog_ctrl_enable_irq(struct msm_dp_catalog *msm_dp_catalog, bool enable); +void msm_dp_catalog_hpd_config_intr(struct msm_dp_catalog *msm_dp_catalog, u32 intr_mask, bool en); -void dp_catalog_ctrl_hpd_enable(struct dp_catalog *dp_catalog); -void dp_catalog_ctrl_hpd_disable(struct dp_catalog *dp_catalog); -void dp_catalog_ctrl_config_psr(struct dp_catalog *dp_catalog); -void dp_catalog_ctrl_set_psr(struct dp_catalog *dp_catalog, bool enter); -u32 dp_catalog_link_is_connected(struct dp_catalog *dp_catalog); -u32 dp_catalog_hpd_get_intr_status(struct dp_catalog *dp_catalog); -void dp_catalog_ctrl_phy_reset(struct dp_catalog *dp_catalog); -int dp_catalog_ctrl_get_interrupt(struct dp_catalog *dp_catalog); -u32 dp_catalog_ctrl_read_psr_interrupt_status(struct dp_catalog *dp_catalog); -void dp_catalog_ctrl_update_transfer_unit(struct dp_catalog *dp_catalog, - u32 dp_tu, u32 valid_boundary, +void msm_dp_catalog_ctrl_hpd_enable(struct msm_dp_catalog *msm_dp_catalog); +void msm_dp_catalog_ctrl_hpd_disable(struct msm_dp_catalog *msm_dp_catalog); +void msm_dp_catalog_ctrl_config_psr(struct msm_dp_catalog *msm_dp_catalog); +void msm_dp_catalog_ctrl_set_psr(struct msm_dp_catalog *msm_dp_catalog, bool enter); +u32 msm_dp_catalog_link_is_connected(struct msm_dp_catalog *msm_dp_catalog); +u32 msm_dp_catalog_hpd_get_intr_status(struct msm_dp_catalog *msm_dp_catalog); +void msm_dp_catalog_ctrl_phy_reset(struct msm_dp_catalog *msm_dp_catalog); +int msm_dp_catalog_ctrl_get_interrupt(struct msm_dp_catalog *msm_dp_catalog); +u32 msm_dp_catalog_ctrl_read_psr_interrupt_status(struct msm_dp_catalog *msm_dp_catalog); +void msm_dp_catalog_ctrl_update_transfer_unit(struct msm_dp_catalog *msm_dp_catalog, + u32 msm_dp_tu, u32 valid_boundary, u32 valid_boundary2); -void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog *dp_catalog, +void msm_dp_catalog_ctrl_send_phy_pattern(struct msm_dp_catalog *msm_dp_catalog, u32 pattern); -u32 dp_catalog_ctrl_read_phy_pattern(struct dp_catalog *dp_catalog); +u32 msm_dp_catalog_ctrl_read_phy_pattern(struct msm_dp_catalog *msm_dp_catalog); /* DP Panel APIs */ -int dp_catalog_panel_timing_cfg(struct dp_catalog *dp_catalog, u32 total, - u32 sync_start, u32 width_blanking, u32 dp_active); -void dp_catalog_panel_enable_vsc_sdp(struct dp_catalog *dp_catalog, struct dp_sdp *vsc_sdp); -void dp_catalog_panel_disable_vsc_sdp(struct dp_catalog *dp_catalog); -void dp_catalog_dump_regs(struct dp_catalog *dp_catalog); -void dp_catalog_panel_tpg_enable(struct dp_catalog *dp_catalog, +int msm_dp_catalog_panel_timing_cfg(struct msm_dp_catalog *msm_dp_catalog, u32 total, + u32 sync_start, u32 width_blanking, u32 msm_dp_active); +void msm_dp_catalog_panel_enable_vsc_sdp(struct msm_dp_catalog *msm_dp_catalog, struct dp_sdp *vsc_sdp); +void msm_dp_catalog_panel_disable_vsc_sdp(struct msm_dp_catalog *msm_dp_catalog); +void msm_dp_catalog_dump_regs(struct msm_dp_catalog *msm_dp_catalog); +void msm_dp_catalog_panel_tpg_enable(struct msm_dp_catalog *msm_dp_catalog, struct drm_display_mode *drm_mode); -void dp_catalog_panel_tpg_disable(struct dp_catalog *dp_catalog); +void msm_dp_catalog_panel_tpg_disable(struct msm_dp_catalog *msm_dp_catalog); -struct dp_catalog *dp_catalog_get(struct device *dev); +struct msm_dp_catalog *msm_dp_catalog_get(struct device *dev); /* DP Audio APIs */ -u32 dp_catalog_audio_get_header(struct dp_catalog *dp_catalog, - enum dp_catalog_audio_sdp_type sdp, - enum dp_catalog_audio_header_type header); -void dp_catalog_audio_set_header(struct dp_catalog *dp_catalog, - enum dp_catalog_audio_sdp_type sdp, - enum dp_catalog_audio_header_type header, +u32 msm_dp_catalog_audio_get_header(struct msm_dp_catalog *msm_dp_catalog, + enum msm_dp_catalog_audio_sdp_type sdp, + enum msm_dp_catalog_audio_header_type header); +void msm_dp_catalog_audio_set_header(struct msm_dp_catalog *msm_dp_catalog, + enum msm_dp_catalog_audio_sdp_type sdp, + enum msm_dp_catalog_audio_header_type header, u32 data); -void dp_catalog_audio_config_acr(struct dp_catalog *catalog, u32 select); -void dp_catalog_audio_enable(struct dp_catalog *catalog, bool enable); -void dp_catalog_audio_config_sdp(struct dp_catalog *catalog); -void dp_catalog_audio_init(struct dp_catalog *catalog); -void dp_catalog_audio_sfe_level(struct dp_catalog *catalog, u32 safe_to_exit_level); +void msm_dp_catalog_audio_config_acr(struct msm_dp_catalog *catalog, u32 select); +void msm_dp_catalog_audio_enable(struct msm_dp_catalog *catalog, bool enable); +void msm_dp_catalog_audio_config_sdp(struct msm_dp_catalog *catalog); +void msm_dp_catalog_audio_init(struct msm_dp_catalog *catalog); +void msm_dp_catalog_audio_sfe_level(struct msm_dp_catalog *catalog, u32 safe_to_exit_level); #endif /* _DP_CATALOG_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c index f342fc5ae41e..bc2ca8133b79 100644 --- a/drivers/gpu/drm/msm/dp/dp_ctrl.c +++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c @@ -40,7 +40,7 @@ enum { DP_TRAINING_2, }; -struct dp_tu_calc_input { +struct msm_dp_tu_calc_input { u64 lclk; /* 162, 270, 540 and 810 */ u64 pclk_khz; /* in KHz */ u64 hactive; /* active h-width */ @@ -55,7 +55,7 @@ struct dp_tu_calc_input { int num_of_dsc_slices; /* number of slices per line */ }; -struct dp_vc_tu_mapping_table { +struct msm_dp_vc_tu_mapping_table { u32 vic; u8 lanes; u8 lrate; /* DP_LINK_RATE -> 162(6), 270(10), 540(20), 810 (30) */ @@ -69,14 +69,14 @@ struct dp_vc_tu_mapping_table { u8 tu_size_minus1; }; -struct dp_ctrl_private { - struct dp_ctrl dp_ctrl; +struct msm_dp_ctrl_private { + struct msm_dp_ctrl msm_dp_ctrl; struct drm_device *drm_dev; struct device *dev; struct drm_dp_aux *aux; - struct dp_panel *panel; - struct dp_link *link; - struct dp_catalog *catalog; + struct msm_dp_panel *panel; + struct msm_dp_link *link; + struct msm_dp_catalog *catalog; struct phy *phy; @@ -99,8 +99,8 @@ struct dp_ctrl_private { bool stream_clks_on; }; -static int dp_aux_link_configure(struct drm_dp_aux *aux, - struct dp_link_info *link) +static int msm_dp_aux_link_configure(struct drm_dp_aux *aux, + struct msm_dp_link_info *link) { u8 values[2]; int err; @@ -118,14 +118,14 @@ static int dp_aux_link_configure(struct drm_dp_aux *aux, return 0; } -void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl) +void msm_dp_ctrl_push_idle(struct msm_dp_ctrl *msm_dp_ctrl) { - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl); reinit_completion(&ctrl->idle_comp); - dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_PUSH_IDLE); + msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_PUSH_IDLE); if (!wait_for_completion_timeout(&ctrl->idle_comp, IDLE_PATTERN_COMPLETION_TIMEOUT_JIFFIES)) @@ -134,7 +134,7 @@ void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl) drm_dbg_dp(ctrl->drm_dev, "mainlink off\n"); } -static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl) +static void msm_dp_ctrl_config_ctrl(struct msm_dp_ctrl_private *ctrl) { u32 config = 0, tbd; const u8 *dpcd = ctrl->panel->dpcd; @@ -142,15 +142,15 @@ static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl) /* Default-> LSCLK DIV: 1/4 LCLK */ config |= (2 << DP_CONFIGURATION_CTRL_LSCLK_DIV_SHIFT); - if (ctrl->panel->dp_mode.out_fmt_is_yuv_420) + if (ctrl->panel->msm_dp_mode.out_fmt_is_yuv_420) config |= DP_CONFIGURATION_CTRL_RGB_YUV; /* YUV420 */ /* Scrambler reset enable */ if (drm_dp_alternate_scrambler_reset_cap(dpcd)) config |= DP_CONFIGURATION_CTRL_ASSR; - tbd = dp_link_get_test_bits_depth(ctrl->link, - ctrl->panel->dp_mode.bpp); + tbd = msm_dp_link_get_test_bits_depth(ctrl->link, + ctrl->panel->msm_dp_mode.bpp); config |= tbd << DP_CONFIGURATION_CTRL_BPC_SHIFT; @@ -170,24 +170,24 @@ static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl) if (ctrl->panel->psr_cap.version) config |= DP_CONFIGURATION_CTRL_SEND_VSC; - dp_catalog_ctrl_config_ctrl(ctrl->catalog, config); + msm_dp_catalog_ctrl_config_ctrl(ctrl->catalog, config); } -static void dp_ctrl_configure_source_params(struct dp_ctrl_private *ctrl) +static void msm_dp_ctrl_configure_source_params(struct msm_dp_ctrl_private *ctrl) { u32 cc, tb; - dp_catalog_ctrl_lane_mapping(ctrl->catalog); - dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, true); - dp_catalog_setup_peripheral_flush(ctrl->catalog); + msm_dp_catalog_ctrl_lane_mapping(ctrl->catalog); + msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, true); + msm_dp_catalog_setup_peripheral_flush(ctrl->catalog); - dp_ctrl_config_ctrl(ctrl); + msm_dp_ctrl_config_ctrl(ctrl); - tb = dp_link_get_test_bits_depth(ctrl->link, - ctrl->panel->dp_mode.bpp); - cc = dp_link_get_colorimetry_config(ctrl->link); - dp_catalog_ctrl_config_misc(ctrl->catalog, cc, tb); - dp_panel_timing_cfg(ctrl->panel); + tb = msm_dp_link_get_test_bits_depth(ctrl->link, + ctrl->panel->msm_dp_mode.bpp); + cc = msm_dp_link_get_colorimetry_config(ctrl->link); + msm_dp_catalog_ctrl_config_misc(ctrl->catalog, cc, tb); + msm_dp_panel_timing_cfg(ctrl->panel); } /* @@ -310,7 +310,7 @@ static int _tu_param_compare(s64 a, s64 b) } } -static void dp_panel_update_tu_timings(struct dp_tu_calc_input *in, +static void msm_dp_panel_update_tu_timings(struct msm_dp_tu_calc_input *in, struct tu_algo_data *tu) { int nlanes = in->nlanes; @@ -622,9 +622,9 @@ static void _tu_valid_boundary_calc(struct tu_algo_data *tu) } } -static void _dp_ctrl_calc_tu(struct dp_ctrl_private *ctrl, - struct dp_tu_calc_input *in, - struct dp_vc_tu_mapping_table *tu_table) +static void _dp_ctrl_calc_tu(struct msm_dp_ctrl_private *ctrl, + struct msm_dp_tu_calc_input *in, + struct msm_dp_vc_tu_mapping_table *tu_table) { struct tu_algo_data *tu; int compare_result_1, compare_result_2; @@ -645,7 +645,7 @@ static void _dp_ctrl_calc_tu(struct dp_ctrl_private *ctrl, if (!tu) return; - dp_panel_update_tu_timings(in, tu); + msm_dp_panel_update_tu_timings(in, tu); tu->err_fp = drm_fixp_from_fraction(1000, 1); /* 1000 */ @@ -956,21 +956,21 @@ tu_size_calc: kfree(tu); } -static void dp_ctrl_calc_tu_parameters(struct dp_ctrl_private *ctrl, - struct dp_vc_tu_mapping_table *tu_table) +static void msm_dp_ctrl_calc_tu_parameters(struct msm_dp_ctrl_private *ctrl, + struct msm_dp_vc_tu_mapping_table *tu_table) { - struct dp_tu_calc_input in; + struct msm_dp_tu_calc_input in; struct drm_display_mode *drm_mode; - drm_mode = &ctrl->panel->dp_mode.drm_mode; + drm_mode = &ctrl->panel->msm_dp_mode.drm_mode; in.lclk = ctrl->link->link_params.rate / 1000; in.pclk_khz = drm_mode->clock; in.hactive = drm_mode->hdisplay; in.hporch = drm_mode->htotal - drm_mode->hdisplay; in.nlanes = ctrl->link->link_params.num_lanes; - in.bpp = ctrl->panel->dp_mode.bpp; - in.pixel_enc = ctrl->panel->dp_mode.out_fmt_is_yuv_420 ? 420 : 444; + in.bpp = ctrl->panel->msm_dp_mode.bpp; + in.pixel_enc = ctrl->panel->msm_dp_mode.out_fmt_is_yuv_420 ? 420 : 444; in.dsc_en = 0; in.async_en = 0; in.fec_en = 0; @@ -980,16 +980,16 @@ static void dp_ctrl_calc_tu_parameters(struct dp_ctrl_private *ctrl, _dp_ctrl_calc_tu(ctrl, &in, tu_table); } -static void dp_ctrl_setup_tr_unit(struct dp_ctrl_private *ctrl) +static void msm_dp_ctrl_setup_tr_unit(struct msm_dp_ctrl_private *ctrl) { - u32 dp_tu = 0x0; + u32 msm_dp_tu = 0x0; u32 valid_boundary = 0x0; u32 valid_boundary2 = 0x0; - struct dp_vc_tu_mapping_table tu_calc_table; + struct msm_dp_vc_tu_mapping_table tu_calc_table; - dp_ctrl_calc_tu_parameters(ctrl, &tu_calc_table); + msm_dp_ctrl_calc_tu_parameters(ctrl, &tu_calc_table); - dp_tu |= tu_calc_table.tu_size_minus1; + msm_dp_tu |= tu_calc_table.tu_size_minus1; valid_boundary |= tu_calc_table.valid_boundary_link; valid_boundary |= (tu_calc_table.delay_start_link << 16); @@ -1001,13 +1001,13 @@ static void dp_ctrl_setup_tr_unit(struct dp_ctrl_private *ctrl) valid_boundary2 |= BIT(0); pr_debug("dp_tu=0x%x, valid_boundary=0x%x, valid_boundary2=0x%x\n", - dp_tu, valid_boundary, valid_boundary2); + msm_dp_tu, valid_boundary, valid_boundary2); - dp_catalog_ctrl_update_transfer_unit(ctrl->catalog, - dp_tu, valid_boundary, valid_boundary2); + msm_dp_catalog_ctrl_update_transfer_unit(ctrl->catalog, + msm_dp_tu, valid_boundary, valid_boundary2); } -static int dp_ctrl_wait4video_ready(struct dp_ctrl_private *ctrl) +static int msm_dp_ctrl_wait4video_ready(struct msm_dp_ctrl_private *ctrl) { int ret = 0; @@ -1019,7 +1019,7 @@ static int dp_ctrl_wait4video_ready(struct dp_ctrl_private *ctrl) return ret; } -static int dp_ctrl_set_vx_px(struct dp_ctrl_private *ctrl, +static int msm_dp_ctrl_set_vx_px(struct msm_dp_ctrl_private *ctrl, u8 v_level, u8 p_level) { union phy_configure_opts *phy_opts = &ctrl->phy_opts; @@ -1034,9 +1034,9 @@ static int dp_ctrl_set_vx_px(struct dp_ctrl_private *ctrl, return 0; } -static int dp_ctrl_update_vx_px(struct dp_ctrl_private *ctrl) +static int msm_dp_ctrl_update_vx_px(struct msm_dp_ctrl_private *ctrl) { - struct dp_link *link = ctrl->link; + struct msm_dp_link *link = ctrl->link; int ret = 0, lane, lane_cnt; u8 buf[4]; u32 max_level_reached = 0; @@ -1046,7 +1046,7 @@ static int dp_ctrl_update_vx_px(struct dp_ctrl_private *ctrl) drm_dbg_dp(ctrl->drm_dev, "voltage level: %d emphasis level: %d\n", voltage_swing_level, pre_emphasis_level); - ret = dp_ctrl_set_vx_px(ctrl, + ret = msm_dp_ctrl_set_vx_px(ctrl, voltage_swing_level, pre_emphasis_level); if (ret) @@ -1083,7 +1083,7 @@ static int dp_ctrl_update_vx_px(struct dp_ctrl_private *ctrl) return ret; } -static bool dp_ctrl_train_pattern_set(struct dp_ctrl_private *ctrl, +static bool msm_dp_ctrl_train_pattern_set(struct msm_dp_ctrl_private *ctrl, u8 pattern) { u8 buf; @@ -1100,7 +1100,7 @@ static bool dp_ctrl_train_pattern_set(struct dp_ctrl_private *ctrl, return ret == 1; } -static int dp_ctrl_read_link_status(struct dp_ctrl_private *ctrl, +static int msm_dp_ctrl_read_link_status(struct msm_dp_ctrl_private *ctrl, u8 *link_status) { int ret = 0, len; @@ -1114,24 +1114,24 @@ static int dp_ctrl_read_link_status(struct dp_ctrl_private *ctrl, return ret; } -static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl, +static int msm_dp_ctrl_link_train_1(struct msm_dp_ctrl_private *ctrl, int *training_step) { int tries, old_v_level, ret = 0; u8 link_status[DP_LINK_STATUS_SIZE]; int const maximum_retries = 4; - dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0); + msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0); *training_step = DP_TRAINING_1; - ret = dp_catalog_ctrl_set_pattern_state_bit(ctrl->catalog, 1); + ret = msm_dp_catalog_ctrl_set_pattern_state_bit(ctrl->catalog, 1); if (ret) return ret; - dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_1 | + msm_dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_1 | DP_LINK_SCRAMBLING_DISABLE); - ret = dp_ctrl_update_vx_px(ctrl); + ret = msm_dp_ctrl_update_vx_px(ctrl); if (ret) return ret; @@ -1140,7 +1140,7 @@ static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl, for (tries = 0; tries < maximum_retries; tries++) { drm_dp_link_train_clock_recovery_delay(ctrl->aux, ctrl->panel->dpcd); - ret = dp_ctrl_read_link_status(ctrl, link_status); + ret = msm_dp_ctrl_read_link_status(ctrl, link_status); if (ret) return ret; @@ -1160,8 +1160,8 @@ static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl, old_v_level = ctrl->link->phy_params.v_level; } - dp_link_adjust_levels(ctrl->link, link_status); - ret = dp_ctrl_update_vx_px(ctrl); + msm_dp_link_adjust_levels(ctrl->link, link_status); + ret = msm_dp_ctrl_update_vx_px(ctrl); if (ret) return ret; } @@ -1170,7 +1170,7 @@ static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl, return -ETIMEDOUT; } -static int dp_ctrl_link_rate_down_shift(struct dp_ctrl_private *ctrl) +static int msm_dp_ctrl_link_rate_down_shift(struct msm_dp_ctrl_private *ctrl) { int ret = 0; @@ -1198,7 +1198,7 @@ static int dp_ctrl_link_rate_down_shift(struct dp_ctrl_private *ctrl) return ret; } -static int dp_ctrl_link_lane_down_shift(struct dp_ctrl_private *ctrl) +static int msm_dp_ctrl_link_lane_down_shift(struct msm_dp_ctrl_private *ctrl) { if (ctrl->link->link_params.num_lanes == 1) @@ -1213,13 +1213,13 @@ static int dp_ctrl_link_lane_down_shift(struct dp_ctrl_private *ctrl) return 0; } -static void dp_ctrl_clear_training_pattern(struct dp_ctrl_private *ctrl) +static void msm_dp_ctrl_clear_training_pattern(struct msm_dp_ctrl_private *ctrl) { - dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_DISABLE); + msm_dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_DISABLE); drm_dp_link_train_channel_eq_delay(ctrl->aux, ctrl->panel->dpcd); } -static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl, +static int msm_dp_ctrl_link_train_2(struct msm_dp_ctrl_private *ctrl, int *training_step) { int tries = 0, ret = 0; @@ -1228,7 +1228,7 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl, int const maximum_retries = 5; u8 link_status[DP_LINK_STATUS_SIZE]; - dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0); + msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0); *training_step = DP_TRAINING_2; @@ -1243,16 +1243,16 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl, state_ctrl_bit = 2; } - ret = dp_catalog_ctrl_set_pattern_state_bit(ctrl->catalog, state_ctrl_bit); + ret = msm_dp_catalog_ctrl_set_pattern_state_bit(ctrl->catalog, state_ctrl_bit); if (ret) return ret; - dp_ctrl_train_pattern_set(ctrl, pattern); + msm_dp_ctrl_train_pattern_set(ctrl, pattern); for (tries = 0; tries <= maximum_retries; tries++) { drm_dp_link_train_channel_eq_delay(ctrl->aux, ctrl->panel->dpcd); - ret = dp_ctrl_read_link_status(ctrl, link_status); + ret = msm_dp_ctrl_read_link_status(ctrl, link_status); if (ret) return ret; @@ -1261,8 +1261,8 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl, return 0; } - dp_link_adjust_levels(ctrl->link, link_status); - ret = dp_ctrl_update_vx_px(ctrl); + msm_dp_link_adjust_levels(ctrl->link, link_status); + ret = msm_dp_ctrl_update_vx_px(ctrl); if (ret) return ret; @@ -1271,24 +1271,24 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl, return -ETIMEDOUT; } -static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl, +static int msm_dp_ctrl_link_train(struct msm_dp_ctrl_private *ctrl, int *training_step) { int ret = 0; const u8 *dpcd = ctrl->panel->dpcd; u8 encoding[] = { 0, DP_SET_ANSI_8B10B }; u8 assr; - struct dp_link_info link_info = {0}; + struct msm_dp_link_info link_info = {0}; - dp_ctrl_config_ctrl(ctrl); + msm_dp_ctrl_config_ctrl(ctrl); link_info.num_lanes = ctrl->link->link_params.num_lanes; link_info.rate = ctrl->link->link_params.rate; link_info.capabilities = DP_LINK_CAP_ENHANCED_FRAMING; - dp_link_reset_phy_params_vx_px(ctrl->link); + msm_dp_link_reset_phy_params_vx_px(ctrl->link); - dp_aux_link_configure(ctrl->aux, &link_info); + msm_dp_aux_link_configure(ctrl->aux, &link_info); if (drm_dp_max_downspread(dpcd)) encoding[0] |= DP_SPREAD_AMP_0_5; @@ -1302,7 +1302,7 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl, &assr, 1); } - ret = dp_ctrl_link_train_1(ctrl, training_step); + ret = msm_dp_ctrl_link_train_1(ctrl, training_step); if (ret) { DRM_ERROR("link training #1 failed. ret=%d\n", ret); goto end; @@ -1311,7 +1311,7 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl, /* print success info as this is a result of user initiated action */ drm_dbg_dp(ctrl->drm_dev, "link training #1 successful\n"); - ret = dp_ctrl_link_train_2(ctrl, training_step); + ret = msm_dp_ctrl_link_train_2(ctrl, training_step); if (ret) { DRM_ERROR("link training #2 failed. ret=%d\n", ret); goto end; @@ -1321,17 +1321,17 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl, drm_dbg_dp(ctrl->drm_dev, "link training #2 successful\n"); end: - dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0); + msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0); return ret; } -static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl, +static int msm_dp_ctrl_setup_main_link(struct msm_dp_ctrl_private *ctrl, int *training_step) { int ret = 0; - dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, true); + msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, true); if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) return ret; @@ -1342,17 +1342,17 @@ static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl, * a link training pattern, we have to first do soft reset. */ - ret = dp_ctrl_link_train(ctrl, training_step); + ret = msm_dp_ctrl_link_train(ctrl, training_step); return ret; } -int dp_ctrl_core_clk_enable(struct dp_ctrl *dp_ctrl) +int msm_dp_ctrl_core_clk_enable(struct msm_dp_ctrl *msm_dp_ctrl) { - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; int ret = 0; - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl); if (ctrl->core_clks_on) { drm_dbg_dp(ctrl->drm_dev, "core clks already enabled\n"); @@ -1374,11 +1374,11 @@ int dp_ctrl_core_clk_enable(struct dp_ctrl *dp_ctrl) return 0; } -void dp_ctrl_core_clk_disable(struct dp_ctrl *dp_ctrl) +void msm_dp_ctrl_core_clk_disable(struct msm_dp_ctrl *msm_dp_ctrl) { - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl); clk_bulk_disable_unprepare(ctrl->num_core_clks, ctrl->core_clks); @@ -1391,12 +1391,12 @@ void dp_ctrl_core_clk_disable(struct dp_ctrl *dp_ctrl) ctrl->core_clks_on ? "on" : "off"); } -static int dp_ctrl_link_clk_enable(struct dp_ctrl *dp_ctrl) +static int msm_dp_ctrl_link_clk_enable(struct msm_dp_ctrl *msm_dp_ctrl) { - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; int ret = 0; - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl); if (ctrl->link_clks_on) { drm_dbg_dp(ctrl->drm_dev, "links clks already enabled\n"); @@ -1406,7 +1406,7 @@ static int dp_ctrl_link_clk_enable(struct dp_ctrl *dp_ctrl) if (!ctrl->core_clks_on) { drm_dbg_dp(ctrl->drm_dev, "Enable core clks before link clks\n"); - dp_ctrl_core_clk_enable(dp_ctrl); + msm_dp_ctrl_core_clk_enable(msm_dp_ctrl); } ret = clk_bulk_prepare_enable(ctrl->num_link_clks, ctrl->link_clks); @@ -1424,11 +1424,11 @@ static int dp_ctrl_link_clk_enable(struct dp_ctrl *dp_ctrl) return 0; } -static void dp_ctrl_link_clk_disable(struct dp_ctrl *dp_ctrl) +static void msm_dp_ctrl_link_clk_disable(struct msm_dp_ctrl *msm_dp_ctrl) { - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl); clk_bulk_disable_unprepare(ctrl->num_link_clks, ctrl->link_clks); @@ -1441,7 +1441,7 @@ static void dp_ctrl_link_clk_disable(struct dp_ctrl *dp_ctrl) ctrl->core_clks_on ? "on" : "off"); } -static int dp_ctrl_enable_mainlink_clocks(struct dp_ctrl_private *ctrl) +static int msm_dp_ctrl_enable_mainlink_clocks(struct msm_dp_ctrl_private *ctrl) { int ret = 0; struct phy *phy = ctrl->phy; @@ -1455,7 +1455,7 @@ static int dp_ctrl_enable_mainlink_clocks(struct dp_ctrl_private *ctrl) phy_power_on(phy); dev_pm_opp_set_rate(ctrl->dev, ctrl->link->link_params.rate * 1000); - ret = dp_ctrl_link_clk_enable(&ctrl->dp_ctrl); + ret = msm_dp_ctrl_link_clk_enable(&ctrl->msm_dp_ctrl); if (ret) DRM_ERROR("Unable to start link clocks. ret=%d\n", ret); @@ -1464,13 +1464,13 @@ static int dp_ctrl_enable_mainlink_clocks(struct dp_ctrl_private *ctrl) return ret; } -void dp_ctrl_reset_irq_ctrl(struct dp_ctrl *dp_ctrl, bool enable) +void msm_dp_ctrl_reset_irq_ctrl(struct msm_dp_ctrl *msm_dp_ctrl, bool enable) { - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl); - dp_catalog_ctrl_reset(ctrl->catalog); + msm_dp_catalog_ctrl_reset(ctrl->catalog); /* * all dp controller programmable registers will not @@ -1478,28 +1478,28 @@ void dp_ctrl_reset_irq_ctrl(struct dp_ctrl *dp_ctrl, bool enable) * therefore interrupt mask bits have to be updated * to enable/disable interrupts */ - dp_catalog_ctrl_enable_irq(ctrl->catalog, enable); + msm_dp_catalog_ctrl_enable_irq(ctrl->catalog, enable); } -void dp_ctrl_config_psr(struct dp_ctrl *dp_ctrl) +void msm_dp_ctrl_config_psr(struct msm_dp_ctrl *msm_dp_ctrl) { u8 cfg; - struct dp_ctrl_private *ctrl = container_of(dp_ctrl, - struct dp_ctrl_private, dp_ctrl); + struct msm_dp_ctrl_private *ctrl = container_of(msm_dp_ctrl, + struct msm_dp_ctrl_private, msm_dp_ctrl); if (!ctrl->panel->psr_cap.version) return; - dp_catalog_ctrl_config_psr(ctrl->catalog); + msm_dp_catalog_ctrl_config_psr(ctrl->catalog); cfg = DP_PSR_ENABLE; drm_dp_dpcd_write(ctrl->aux, DP_PSR_EN_CFG, &cfg, 1); } -void dp_ctrl_set_psr(struct dp_ctrl *dp_ctrl, bool enter) +void msm_dp_ctrl_set_psr(struct msm_dp_ctrl *msm_dp_ctrl, bool enter) { - struct dp_ctrl_private *ctrl = container_of(dp_ctrl, - struct dp_ctrl_private, dp_ctrl); + struct msm_dp_ctrl_private *ctrl = container_of(msm_dp_ctrl, + struct msm_dp_ctrl_private, msm_dp_ctrl); if (!ctrl->panel->psr_cap.version) return; @@ -1516,64 +1516,64 @@ void dp_ctrl_set_psr(struct dp_ctrl *dp_ctrl, bool enter) */ if (enter) { reinit_completion(&ctrl->psr_op_comp); - dp_catalog_ctrl_set_psr(ctrl->catalog, true); + msm_dp_catalog_ctrl_set_psr(ctrl->catalog, true); if (!wait_for_completion_timeout(&ctrl->psr_op_comp, PSR_OPERATION_COMPLETION_TIMEOUT_JIFFIES)) { DRM_ERROR("PSR_ENTRY timedout\n"); - dp_catalog_ctrl_set_psr(ctrl->catalog, false); + msm_dp_catalog_ctrl_set_psr(ctrl->catalog, false); return; } - dp_ctrl_push_idle(dp_ctrl); - dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0); + msm_dp_ctrl_push_idle(msm_dp_ctrl); + msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0); - dp_catalog_ctrl_psr_mainlink_enable(ctrl->catalog, false); + msm_dp_catalog_ctrl_psr_mainlink_enable(ctrl->catalog, false); } else { - dp_catalog_ctrl_psr_mainlink_enable(ctrl->catalog, true); + msm_dp_catalog_ctrl_psr_mainlink_enable(ctrl->catalog, true); - dp_catalog_ctrl_set_psr(ctrl->catalog, false); - dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO); - dp_ctrl_wait4video_ready(ctrl); - dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0); + msm_dp_catalog_ctrl_set_psr(ctrl->catalog, false); + msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO); + msm_dp_ctrl_wait4video_ready(ctrl); + msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0); } } -void dp_ctrl_phy_init(struct dp_ctrl *dp_ctrl) +void msm_dp_ctrl_phy_init(struct msm_dp_ctrl *msm_dp_ctrl) { - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; struct phy *phy; - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl); phy = ctrl->phy; - dp_catalog_ctrl_phy_reset(ctrl->catalog); + msm_dp_catalog_ctrl_phy_reset(ctrl->catalog); phy_init(phy); drm_dbg_dp(ctrl->drm_dev, "phy=%p init=%d power_on=%d\n", phy, phy->init_count, phy->power_count); } -void dp_ctrl_phy_exit(struct dp_ctrl *dp_ctrl) +void msm_dp_ctrl_phy_exit(struct msm_dp_ctrl *msm_dp_ctrl) { - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; struct phy *phy; - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl); phy = ctrl->phy; - dp_catalog_ctrl_phy_reset(ctrl->catalog); + msm_dp_catalog_ctrl_phy_reset(ctrl->catalog); phy_exit(phy); drm_dbg_dp(ctrl->drm_dev, "phy=%p init=%d power_on=%d\n", phy, phy->init_count, phy->power_count); } -static int dp_ctrl_reinitialize_mainlink(struct dp_ctrl_private *ctrl) +static int msm_dp_ctrl_reinitialize_mainlink(struct msm_dp_ctrl_private *ctrl) { struct phy *phy = ctrl->phy; int ret = 0; - dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false); + msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false); ctrl->phy_opts.dp.lanes = ctrl->link->link_params.num_lanes; phy_configure(phy, &ctrl->phy_opts); /* @@ -1583,13 +1583,13 @@ static int dp_ctrl_reinitialize_mainlink(struct dp_ctrl_private *ctrl) */ dev_pm_opp_set_rate(ctrl->dev, 0); - dp_ctrl_link_clk_disable(&ctrl->dp_ctrl); + msm_dp_ctrl_link_clk_disable(&ctrl->msm_dp_ctrl); phy_power_off(phy); /* hw recommended delay before re-enabling clocks */ msleep(20); - ret = dp_ctrl_enable_mainlink_clocks(ctrl); + ret = msm_dp_ctrl_enable_mainlink_clocks(ctrl); if (ret) { DRM_ERROR("Failed to enable mainlink clks. ret=%d\n", ret); return ret; @@ -1598,18 +1598,18 @@ static int dp_ctrl_reinitialize_mainlink(struct dp_ctrl_private *ctrl) return ret; } -static int dp_ctrl_deinitialize_mainlink(struct dp_ctrl_private *ctrl) +static int msm_dp_ctrl_deinitialize_mainlink(struct msm_dp_ctrl_private *ctrl) { struct phy *phy; phy = ctrl->phy; - dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false); + msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false); - dp_catalog_ctrl_reset(ctrl->catalog); + msm_dp_catalog_ctrl_reset(ctrl->catalog); dev_pm_opp_set_rate(ctrl->dev, 0); - dp_ctrl_link_clk_disable(&ctrl->dp_ctrl); + msm_dp_ctrl_link_clk_disable(&ctrl->msm_dp_ctrl); phy_power_off(phy); @@ -1622,30 +1622,30 @@ static int dp_ctrl_deinitialize_mainlink(struct dp_ctrl_private *ctrl) return 0; } -static int dp_ctrl_link_maintenance(struct dp_ctrl_private *ctrl) +static int msm_dp_ctrl_link_maintenance(struct msm_dp_ctrl_private *ctrl) { int ret = 0; int training_step = DP_TRAINING_NONE; - dp_ctrl_push_idle(&ctrl->dp_ctrl); + msm_dp_ctrl_push_idle(&ctrl->msm_dp_ctrl); ctrl->link->phy_params.p_level = 0; ctrl->link->phy_params.v_level = 0; - ret = dp_ctrl_setup_main_link(ctrl, &training_step); + ret = msm_dp_ctrl_setup_main_link(ctrl, &training_step); if (ret) goto end; - dp_ctrl_clear_training_pattern(ctrl); + msm_dp_ctrl_clear_training_pattern(ctrl); - dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO); + msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO); - ret = dp_ctrl_wait4video_ready(ctrl); + ret = msm_dp_ctrl_wait4video_ready(ctrl); end: return ret; } -static bool dp_ctrl_send_phy_test_pattern(struct dp_ctrl_private *ctrl) +static bool msm_dp_ctrl_send_phy_test_pattern(struct msm_dp_ctrl_private *ctrl) { bool success = false; u32 pattern_sent = 0x0; @@ -1653,17 +1653,17 @@ static bool dp_ctrl_send_phy_test_pattern(struct dp_ctrl_private *ctrl) drm_dbg_dp(ctrl->drm_dev, "request: 0x%x\n", pattern_requested); - if (dp_ctrl_set_vx_px(ctrl, + if (msm_dp_ctrl_set_vx_px(ctrl, ctrl->link->phy_params.v_level, ctrl->link->phy_params.p_level)) { DRM_ERROR("Failed to set v/p levels\n"); return false; } - dp_catalog_ctrl_send_phy_pattern(ctrl->catalog, pattern_requested); - dp_ctrl_update_vx_px(ctrl); - dp_link_send_test_response(ctrl->link); + msm_dp_catalog_ctrl_send_phy_pattern(ctrl->catalog, pattern_requested); + msm_dp_ctrl_update_vx_px(ctrl); + msm_dp_link_send_test_response(ctrl->link); - pattern_sent = dp_catalog_ctrl_read_phy_pattern(ctrl->catalog); + pattern_sent = msm_dp_catalog_ctrl_read_phy_pattern(ctrl->catalog); switch (pattern_sent) { case MR_LINK_TRAINING1: @@ -1697,7 +1697,7 @@ static bool dp_ctrl_send_phy_test_pattern(struct dp_ctrl_private *ctrl) return success; } -static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl) +static int msm_dp_ctrl_process_phy_test_request(struct msm_dp_ctrl_private *ctrl) { int ret; unsigned long pixel_rate; @@ -1713,15 +1713,15 @@ static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl) * running. Add the global reset just before disabling the * link clocks and core clocks. */ - dp_ctrl_off(&ctrl->dp_ctrl); + msm_dp_ctrl_off(&ctrl->msm_dp_ctrl); - ret = dp_ctrl_on_link(&ctrl->dp_ctrl); + ret = msm_dp_ctrl_on_link(&ctrl->msm_dp_ctrl); if (ret) { DRM_ERROR("failed to enable DP link controller\n"); return ret; } - pixel_rate = ctrl->panel->dp_mode.drm_mode.clock; + pixel_rate = ctrl->panel->msm_dp_mode.drm_mode.clock; ret = clk_set_rate(ctrl->pixel_clk, pixel_rate * 1000); if (ret) { DRM_ERROR("Failed to set pixel clock rate. ret=%d\n", ret); @@ -1739,49 +1739,49 @@ static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl) ctrl->stream_clks_on = true; } - dp_ctrl_send_phy_test_pattern(ctrl); + msm_dp_ctrl_send_phy_test_pattern(ctrl); return 0; } -void dp_ctrl_handle_sink_request(struct dp_ctrl *dp_ctrl) +void msm_dp_ctrl_handle_sink_request(struct msm_dp_ctrl *msm_dp_ctrl) { - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; u32 sink_request = 0x0; - if (!dp_ctrl) { + if (!msm_dp_ctrl) { DRM_ERROR("invalid input\n"); return; } - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl); sink_request = ctrl->link->sink_request; if (sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) { drm_dbg_dp(ctrl->drm_dev, "PHY_TEST_PATTERN request\n"); - if (dp_ctrl_process_phy_test_request(ctrl)) { + if (msm_dp_ctrl_process_phy_test_request(ctrl)) { DRM_ERROR("process phy_test_req failed\n"); return; } } if (sink_request & DP_LINK_STATUS_UPDATED) { - if (dp_ctrl_link_maintenance(ctrl)) { + if (msm_dp_ctrl_link_maintenance(ctrl)) { DRM_ERROR("LM failed: TEST_LINK_TRAINING\n"); return; } } if (sink_request & DP_TEST_LINK_TRAINING) { - dp_link_send_test_response(ctrl->link); - if (dp_ctrl_link_maintenance(ctrl)) { + msm_dp_link_send_test_response(ctrl->link); + if (msm_dp_ctrl_link_maintenance(ctrl)) { DRM_ERROR("LM failed: TEST_LINK_TRAINING\n"); return; } } } -static bool dp_ctrl_clock_recovery_any_ok( +static bool msm_dp_ctrl_clock_recovery_any_ok( const u8 link_status[DP_LINK_STATUS_SIZE], int lane_count) { @@ -1800,20 +1800,20 @@ static bool dp_ctrl_clock_recovery_any_ok( return drm_dp_clock_recovery_ok(link_status, reduced_cnt); } -static bool dp_ctrl_channel_eq_ok(struct dp_ctrl_private *ctrl) +static bool msm_dp_ctrl_channel_eq_ok(struct msm_dp_ctrl_private *ctrl) { u8 link_status[DP_LINK_STATUS_SIZE]; int num_lanes = ctrl->link->link_params.num_lanes; - dp_ctrl_read_link_status(ctrl, link_status); + msm_dp_ctrl_read_link_status(ctrl, link_status); return drm_dp_channel_eq_ok(link_status, num_lanes); } -int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl) +int msm_dp_ctrl_on_link(struct msm_dp_ctrl *msm_dp_ctrl) { int rc = 0; - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; u32 rate; int link_train_max_retries = 5; u32 const phy_cts_pixel_clk_khz = 148500; @@ -1821,15 +1821,15 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl) unsigned int training_step; unsigned long pixel_rate; - if (!dp_ctrl) + if (!msm_dp_ctrl) return -EINVAL; - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl); rate = ctrl->panel->link_info.rate; - pixel_rate = ctrl->panel->dp_mode.drm_mode.clock; + pixel_rate = ctrl->panel->msm_dp_mode.drm_mode.clock; - dp_ctrl_core_clk_enable(&ctrl->dp_ctrl); + msm_dp_ctrl_core_clk_enable(&ctrl->msm_dp_ctrl); if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) { drm_dbg_dp(ctrl->drm_dev, @@ -1840,7 +1840,7 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl) ctrl->link->link_params.rate = rate; ctrl->link->link_params.num_lanes = ctrl->panel->link_info.num_lanes; - if (ctrl->panel->dp_mode.out_fmt_is_yuv_420) + if (ctrl->panel->msm_dp_mode.out_fmt_is_yuv_420) pixel_rate >>= 1; } @@ -1848,32 +1848,32 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl) ctrl->link->link_params.rate, ctrl->link->link_params.num_lanes, pixel_rate); - rc = dp_ctrl_enable_mainlink_clocks(ctrl); + rc = msm_dp_ctrl_enable_mainlink_clocks(ctrl); if (rc) return rc; while (--link_train_max_retries) { training_step = DP_TRAINING_NONE; - rc = dp_ctrl_setup_main_link(ctrl, &training_step); + rc = msm_dp_ctrl_setup_main_link(ctrl, &training_step); if (rc == 0) { /* training completed successfully */ break; } else if (training_step == DP_TRAINING_1) { /* link train_1 failed */ - if (!dp_catalog_link_is_connected(ctrl->catalog)) + if (!msm_dp_catalog_link_is_connected(ctrl->catalog)) break; - dp_ctrl_read_link_status(ctrl, link_status); + msm_dp_ctrl_read_link_status(ctrl, link_status); - rc = dp_ctrl_link_rate_down_shift(ctrl); + rc = msm_dp_ctrl_link_rate_down_shift(ctrl); if (rc < 0) { /* already in RBR = 1.6G */ - if (dp_ctrl_clock_recovery_any_ok(link_status, + if (msm_dp_ctrl_clock_recovery_any_ok(link_status, ctrl->link->link_params.num_lanes)) { /* * some lanes are ready, * reduce lane number */ - rc = dp_ctrl_link_lane_down_shift(ctrl); + rc = msm_dp_ctrl_link_lane_down_shift(ctrl); if (rc < 0) { /* lane == 1 already */ /* end with failure */ break; @@ -1885,16 +1885,16 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl) } } else if (training_step == DP_TRAINING_2) { /* link train_2 failed */ - if (!dp_catalog_link_is_connected(ctrl->catalog)) + if (!msm_dp_catalog_link_is_connected(ctrl->catalog)) break; - dp_ctrl_read_link_status(ctrl, link_status); + msm_dp_ctrl_read_link_status(ctrl, link_status); if (!drm_dp_clock_recovery_ok(link_status, ctrl->link->link_params.num_lanes)) - rc = dp_ctrl_link_rate_down_shift(ctrl); + rc = msm_dp_ctrl_link_rate_down_shift(ctrl); else - rc = dp_ctrl_link_lane_down_shift(ctrl); + rc = msm_dp_ctrl_link_lane_down_shift(ctrl); if (rc < 0) { /* end with failure */ @@ -1902,10 +1902,10 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl) } /* stop link training before start re training */ - dp_ctrl_clear_training_pattern(ctrl); + msm_dp_ctrl_clear_training_pattern(ctrl); } - rc = dp_ctrl_reinitialize_mainlink(ctrl); + rc = msm_dp_ctrl_reinitialize_mainlink(ctrl); if (rc) { DRM_ERROR("Failed to reinitialize mainlink. rc=%d\n", rc); break; @@ -1926,38 +1926,38 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl) * link training failed * end txing train pattern here */ - dp_ctrl_clear_training_pattern(ctrl); + msm_dp_ctrl_clear_training_pattern(ctrl); - dp_ctrl_deinitialize_mainlink(ctrl); + msm_dp_ctrl_deinitialize_mainlink(ctrl); rc = -ECONNRESET; } return rc; } -static int dp_ctrl_link_retrain(struct dp_ctrl_private *ctrl) +static int msm_dp_ctrl_link_retrain(struct msm_dp_ctrl_private *ctrl) { int training_step = DP_TRAINING_NONE; - return dp_ctrl_setup_main_link(ctrl, &training_step); + return msm_dp_ctrl_setup_main_link(ctrl, &training_step); } -int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train) +int msm_dp_ctrl_on_stream(struct msm_dp_ctrl *msm_dp_ctrl, bool force_link_train) { int ret = 0; bool mainlink_ready = false; - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; unsigned long pixel_rate; unsigned long pixel_rate_orig; - if (!dp_ctrl) + if (!msm_dp_ctrl) return -EINVAL; - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl); - pixel_rate = pixel_rate_orig = ctrl->panel->dp_mode.drm_mode.clock; + pixel_rate = pixel_rate_orig = ctrl->panel->msm_dp_mode.drm_mode.clock; - if (dp_ctrl->wide_bus_en || ctrl->panel->dp_mode.out_fmt_is_yuv_420) + if (msm_dp_ctrl->wide_bus_en || ctrl->panel->msm_dp_mode.out_fmt_is_yuv_420) pixel_rate >>= 1; drm_dbg_dp(ctrl->drm_dev, "rate=%d, num_lanes=%d, pixel_rate=%lu\n", @@ -1969,7 +1969,7 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train) ctrl->core_clks_on, ctrl->link_clks_on, ctrl->stream_clks_on); if (!ctrl->link_clks_on) { /* link clk is off */ - ret = dp_ctrl_enable_mainlink_clocks(ctrl); + ret = msm_dp_ctrl_enable_mainlink_clocks(ctrl); if (ret) { DRM_ERROR("Failed to start link clocks. ret=%d\n", ret); goto end; @@ -1993,11 +1993,11 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train) ctrl->stream_clks_on = true; } - if (force_link_train || !dp_ctrl_channel_eq_ok(ctrl)) - dp_ctrl_link_retrain(ctrl); + if (force_link_train || !msm_dp_ctrl_channel_eq_ok(ctrl)) + msm_dp_ctrl_link_retrain(ctrl); /* stop txing train pattern to end link training */ - dp_ctrl_clear_training_pattern(ctrl); + msm_dp_ctrl_clear_training_pattern(ctrl); /* * Set up transfer unit values and set controller state to send @@ -2005,22 +2005,22 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train) */ reinit_completion(&ctrl->video_comp); - dp_ctrl_configure_source_params(ctrl); + msm_dp_ctrl_configure_source_params(ctrl); - dp_catalog_ctrl_config_msa(ctrl->catalog, + msm_dp_catalog_ctrl_config_msa(ctrl->catalog, ctrl->link->link_params.rate, pixel_rate_orig, - ctrl->panel->dp_mode.out_fmt_is_yuv_420); + ctrl->panel->msm_dp_mode.out_fmt_is_yuv_420); - dp_ctrl_setup_tr_unit(ctrl); + msm_dp_ctrl_setup_tr_unit(ctrl); - dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO); + msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO); - ret = dp_ctrl_wait4video_ready(ctrl); + ret = msm_dp_ctrl_wait4video_ready(ctrl); if (ret) return ret; - mainlink_ready = dp_catalog_ctrl_mainlink_ready(ctrl->catalog); + mainlink_ready = msm_dp_catalog_ctrl_mainlink_ready(ctrl->catalog); drm_dbg_dp(ctrl->drm_dev, "mainlink %s\n", mainlink_ready ? "READY" : "NOT READY"); @@ -2028,20 +2028,20 @@ end: return ret; } -void dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl) +void msm_dp_ctrl_off_link_stream(struct msm_dp_ctrl *msm_dp_ctrl) { - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; struct phy *phy; - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl); phy = ctrl->phy; - dp_catalog_panel_disable_vsc_sdp(ctrl->catalog); + msm_dp_catalog_panel_disable_vsc_sdp(ctrl->catalog); /* set dongle to D3 (power off) mode */ - dp_link_psm_config(ctrl->link, &ctrl->panel->link_info, true); + msm_dp_link_psm_config(ctrl->link, &ctrl->panel->link_info, true); - dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false); + msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false); if (ctrl->stream_clks_on) { clk_disable_unprepare(ctrl->pixel_clk); @@ -2049,7 +2049,7 @@ void dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl) } dev_pm_opp_set_rate(ctrl->dev, 0); - dp_ctrl_link_clk_disable(&ctrl->dp_ctrl); + msm_dp_ctrl_link_clk_disable(&ctrl->msm_dp_ctrl); phy_power_off(phy); @@ -2061,17 +2061,17 @@ void dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl) phy, phy->init_count, phy->power_count); } -void dp_ctrl_off_link(struct dp_ctrl *dp_ctrl) +void msm_dp_ctrl_off_link(struct msm_dp_ctrl *msm_dp_ctrl) { - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; struct phy *phy; - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl); phy = ctrl->phy; - dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false); + msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false); - dp_ctrl_link_clk_disable(&ctrl->dp_ctrl); + msm_dp_ctrl_link_clk_disable(&ctrl->msm_dp_ctrl); DRM_DEBUG_DP("Before, phy=%p init_count=%d power_on=%d\n", phy, phy->init_count, phy->power_count); @@ -2082,19 +2082,19 @@ void dp_ctrl_off_link(struct dp_ctrl *dp_ctrl) phy, phy->init_count, phy->power_count); } -void dp_ctrl_off(struct dp_ctrl *dp_ctrl) +void msm_dp_ctrl_off(struct msm_dp_ctrl *msm_dp_ctrl) { - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; struct phy *phy; - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl); phy = ctrl->phy; - dp_catalog_panel_disable_vsc_sdp(ctrl->catalog); + msm_dp_catalog_panel_disable_vsc_sdp(ctrl->catalog); - dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false); + msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false); - dp_catalog_ctrl_reset(ctrl->catalog); + msm_dp_catalog_ctrl_reset(ctrl->catalog); if (ctrl->stream_clks_on) { clk_disable_unprepare(ctrl->pixel_clk); @@ -2102,26 +2102,26 @@ void dp_ctrl_off(struct dp_ctrl *dp_ctrl) } dev_pm_opp_set_rate(ctrl->dev, 0); - dp_ctrl_link_clk_disable(&ctrl->dp_ctrl); + msm_dp_ctrl_link_clk_disable(&ctrl->msm_dp_ctrl); phy_power_off(phy); drm_dbg_dp(ctrl->drm_dev, "phy=%p init=%d power_on=%d\n", phy, phy->init_count, phy->power_count); } -irqreturn_t dp_ctrl_isr(struct dp_ctrl *dp_ctrl) +irqreturn_t msm_dp_ctrl_isr(struct msm_dp_ctrl *msm_dp_ctrl) { - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; u32 isr; irqreturn_t ret = IRQ_NONE; - if (!dp_ctrl) + if (!msm_dp_ctrl) return IRQ_NONE; - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl); if (ctrl->panel->psr_cap.version) { - isr = dp_catalog_ctrl_read_psr_interrupt_status(ctrl->catalog); + isr = msm_dp_catalog_ctrl_read_psr_interrupt_status(ctrl->catalog); if (isr) complete(&ctrl->psr_op_comp); @@ -2136,7 +2136,7 @@ irqreturn_t dp_ctrl_isr(struct dp_ctrl *dp_ctrl) drm_dbg_dp(ctrl->drm_dev, "PSR frame capture done\n"); } - isr = dp_catalog_ctrl_get_interrupt(ctrl->catalog); + isr = msm_dp_catalog_ctrl_get_interrupt(ctrl->catalog); if (isr & DP_CTRL_INTR_READY_FOR_VIDEO) { @@ -2164,13 +2164,13 @@ static const char *ctrl_clks[] = { "ctrl_link_iface", }; -static int dp_ctrl_clk_init(struct dp_ctrl *dp_ctrl) +static int msm_dp_ctrl_clk_init(struct msm_dp_ctrl *msm_dp_ctrl) { - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; struct device *dev; int i, rc; - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl); dev = ctrl->dev; ctrl->num_core_clks = ARRAY_SIZE(core_clks); @@ -2204,12 +2204,12 @@ static int dp_ctrl_clk_init(struct dp_ctrl *dp_ctrl) return 0; } -struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link, - struct dp_panel *panel, struct drm_dp_aux *aux, - struct dp_catalog *catalog, +struct msm_dp_ctrl *msm_dp_ctrl_get(struct device *dev, struct msm_dp_link *link, + struct msm_dp_panel *panel, struct drm_dp_aux *aux, + struct msm_dp_catalog *catalog, struct phy *phy) { - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; int ret; if (!dev || !panel || !aux || @@ -2228,7 +2228,7 @@ struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link, if (ret) { dev_err(dev, "invalid DP OPP table in device tree\n"); /* caller do PTR_ERR(opp_table) */ - return (struct dp_ctrl *)ERR_PTR(ret); + return (struct msm_dp_ctrl *)ERR_PTR(ret); } /* OPP table is optional */ @@ -2248,11 +2248,11 @@ struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link, ctrl->dev = dev; ctrl->phy = phy; - ret = dp_ctrl_clk_init(&ctrl->dp_ctrl); + ret = msm_dp_ctrl_clk_init(&ctrl->msm_dp_ctrl); if (ret) { dev_err(dev, "failed to init clocks\n"); return ERR_PTR(ret); } - return &ctrl->dp_ctrl; + return &ctrl->msm_dp_ctrl; } diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h index ffcbd9a25748..b7abfedbf574 100644 --- a/drivers/gpu/drm/msm/dp/dp_ctrl.h +++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h @@ -11,34 +11,34 @@ #include "dp_link.h" #include "dp_catalog.h" -struct dp_ctrl { +struct msm_dp_ctrl { bool wide_bus_en; }; struct phy; -int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl); -int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train); -void dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl); -void dp_ctrl_off_link(struct dp_ctrl *dp_ctrl); -void dp_ctrl_off(struct dp_ctrl *dp_ctrl); -void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl); -irqreturn_t dp_ctrl_isr(struct dp_ctrl *dp_ctrl); -void dp_ctrl_handle_sink_request(struct dp_ctrl *dp_ctrl); -struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link, - struct dp_panel *panel, struct drm_dp_aux *aux, - struct dp_catalog *catalog, +int msm_dp_ctrl_on_link(struct msm_dp_ctrl *msm_dp_ctrl); +int msm_dp_ctrl_on_stream(struct msm_dp_ctrl *msm_dp_ctrl, bool force_link_train); +void msm_dp_ctrl_off_link_stream(struct msm_dp_ctrl *msm_dp_ctrl); +void msm_dp_ctrl_off_link(struct msm_dp_ctrl *msm_dp_ctrl); +void msm_dp_ctrl_off(struct msm_dp_ctrl *msm_dp_ctrl); +void msm_dp_ctrl_push_idle(struct msm_dp_ctrl *msm_dp_ctrl); +irqreturn_t msm_dp_ctrl_isr(struct msm_dp_ctrl *msm_dp_ctrl); +void msm_dp_ctrl_handle_sink_request(struct msm_dp_ctrl *msm_dp_ctrl); +struct msm_dp_ctrl *msm_dp_ctrl_get(struct device *dev, struct msm_dp_link *link, + struct msm_dp_panel *panel, struct drm_dp_aux *aux, + struct msm_dp_catalog *catalog, struct phy *phy); -void dp_ctrl_reset_irq_ctrl(struct dp_ctrl *dp_ctrl, bool enable); -void dp_ctrl_phy_init(struct dp_ctrl *dp_ctrl); -void dp_ctrl_phy_exit(struct dp_ctrl *dp_ctrl); -void dp_ctrl_irq_phy_exit(struct dp_ctrl *dp_ctrl); +void msm_dp_ctrl_reset_irq_ctrl(struct msm_dp_ctrl *msm_dp_ctrl, bool enable); +void msm_dp_ctrl_phy_init(struct msm_dp_ctrl *msm_dp_ctrl); +void msm_dp_ctrl_phy_exit(struct msm_dp_ctrl *msm_dp_ctrl); +void msm_dp_ctrl_irq_phy_exit(struct msm_dp_ctrl *msm_dp_ctrl); -void dp_ctrl_set_psr(struct dp_ctrl *dp_ctrl, bool enable); -void dp_ctrl_config_psr(struct dp_ctrl *dp_ctrl); +void msm_dp_ctrl_set_psr(struct msm_dp_ctrl *msm_dp_ctrl, bool enable); +void msm_dp_ctrl_config_psr(struct msm_dp_ctrl *msm_dp_ctrl); -int dp_ctrl_core_clk_enable(struct dp_ctrl *dp_ctrl); -void dp_ctrl_core_clk_disable(struct dp_ctrl *dp_ctrl); +int msm_dp_ctrl_core_clk_enable(struct msm_dp_ctrl *msm_dp_ctrl); +void msm_dp_ctrl_core_clk_disable(struct msm_dp_ctrl *msm_dp_ctrl); #endif /* _DP_CTRL_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_debug.c b/drivers/gpu/drm/msm/dp/dp_debug.c index b8611f6d2296..22fd946ee201 100644 --- a/drivers/gpu/drm/msm/dp/dp_debug.c +++ b/drivers/gpu/drm/msm/dp/dp_debug.c @@ -17,15 +17,15 @@ #define DEBUG_NAME "msm_dp" -struct dp_debug_private { - struct dp_link *link; - struct dp_panel *panel; +struct msm_dp_debug_private { + struct msm_dp_link *link; + struct msm_dp_panel *panel; struct drm_connector *connector; }; -static int dp_debug_show(struct seq_file *seq, void *p) +static int msm_dp_debug_show(struct seq_file *seq, void *p) { - struct dp_debug_private *debug = seq->private; + struct msm_dp_debug_private *debug = seq->private; u64 lclk = 0; u32 link_params_rate; const struct drm_display_mode *drm_mode; @@ -33,7 +33,7 @@ static int dp_debug_show(struct seq_file *seq, void *p) if (!debug) return -ENODEV; - drm_mode = &debug->panel->dp_mode.drm_mode; + drm_mode = &debug->panel->msm_dp_mode.drm_mode; seq_printf(seq, "\tname = %s\n", DEBUG_NAME); seq_printf(seq, "\tdrm_dp_link\n\t\trate = %u\n", @@ -55,8 +55,8 @@ static int dp_debug_show(struct seq_file *seq, void *p) drm_mode->hsync_end - drm_mode->hsync_start, drm_mode->vsync_end - drm_mode->vsync_start); seq_printf(seq, "\t\tactive_low = %dx%d\n", - debug->panel->dp_mode.h_active_low, - debug->panel->dp_mode.v_active_low); + debug->panel->msm_dp_mode.h_active_low, + debug->panel->msm_dp_mode.v_active_low); seq_printf(seq, "\t\th_skew = %d\n", drm_mode->hskew); seq_printf(seq, "\t\trefresh rate = %d\n", @@ -64,7 +64,7 @@ static int dp_debug_show(struct seq_file *seq, void *p) seq_printf(seq, "\t\tpixel clock khz = %d\n", drm_mode->clock); seq_printf(seq, "\t\tbpp = %d\n", - debug->panel->dp_mode.bpp); + debug->panel->msm_dp_mode.bpp); /* Link Information */ seq_printf(seq, "\tdp_link:\n\t\ttest_requested = %d\n", @@ -83,11 +83,11 @@ static int dp_debug_show(struct seq_file *seq, void *p) return 0; } -DEFINE_SHOW_ATTRIBUTE(dp_debug); +DEFINE_SHOW_ATTRIBUTE(msm_dp_debug); -static int dp_test_data_show(struct seq_file *m, void *data) +static int msm_dp_test_data_show(struct seq_file *m, void *data) { - const struct dp_debug_private *debug = m->private; + const struct msm_dp_debug_private *debug = m->private; const struct drm_connector *connector = debug->connector; u32 bpc; @@ -98,18 +98,18 @@ static int dp_test_data_show(struct seq_file *m, void *data) seq_printf(m, "vdisplay: %d\n", debug->link->test_video.test_v_height); seq_printf(m, "bpc: %u\n", - dp_link_bit_depth_to_bpp(bpc) / 3); + msm_dp_link_bit_depth_to_bpp(bpc) / 3); } else { seq_puts(m, "0"); } return 0; } -DEFINE_SHOW_ATTRIBUTE(dp_test_data); +DEFINE_SHOW_ATTRIBUTE(msm_dp_test_data); -static int dp_test_type_show(struct seq_file *m, void *data) +static int msm_dp_test_type_show(struct seq_file *m, void *data) { - const struct dp_debug_private *debug = m->private; + const struct msm_dp_debug_private *debug = m->private; const struct drm_connector *connector = debug->connector; if (connector->status == connector_status_connected) @@ -119,15 +119,15 @@ static int dp_test_type_show(struct seq_file *m, void *data) return 0; } -DEFINE_SHOW_ATTRIBUTE(dp_test_type); +DEFINE_SHOW_ATTRIBUTE(msm_dp_test_type); -static ssize_t dp_test_active_write(struct file *file, +static ssize_t msm_dp_test_active_write(struct file *file, const char __user *ubuf, size_t len, loff_t *offp) { char *input_buffer; int status = 0; - const struct dp_debug_private *debug; + const struct msm_dp_debug_private *debug; const struct drm_connector *connector; int val = 0; @@ -164,9 +164,9 @@ static ssize_t dp_test_active_write(struct file *file, return len; } -static int dp_test_active_show(struct seq_file *m, void *data) +static int msm_dp_test_active_show(struct seq_file *m, void *data) { - struct dp_debug_private *debug = m->private; + struct msm_dp_debug_private *debug = m->private; struct drm_connector *connector = debug->connector; if (connector->status == connector_status_connected) { @@ -181,28 +181,28 @@ static int dp_test_active_show(struct seq_file *m, void *data) return 0; } -static int dp_test_active_open(struct inode *inode, +static int msm_dp_test_active_open(struct inode *inode, struct file *file) { - return single_open(file, dp_test_active_show, + return single_open(file, msm_dp_test_active_show, inode->i_private); } static const struct file_operations test_active_fops = { .owner = THIS_MODULE, - .open = dp_test_active_open, + .open = msm_dp_test_active_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, - .write = dp_test_active_write + .write = msm_dp_test_active_write }; -int dp_debug_init(struct device *dev, struct dp_panel *panel, - struct dp_link *link, +int msm_dp_debug_init(struct device *dev, struct msm_dp_panel *panel, + struct msm_dp_link *link, struct drm_connector *connector, struct dentry *root, bool is_edp) { - struct dp_debug_private *debug; + struct msm_dp_debug_private *debug; if (!dev || !panel || !link) { DRM_ERROR("invalid input\n"); @@ -217,20 +217,20 @@ int dp_debug_init(struct device *dev, struct dp_panel *panel, debug->panel = panel; debugfs_create_file("dp_debug", 0444, root, - debug, &dp_debug_fops); + debug, &msm_dp_debug_fops); if (!is_edp) { - debugfs_create_file("msm_dp_test_active", 0444, + debugfs_create_file("dp_test_active", 0444, root, debug, &test_active_fops); - debugfs_create_file("msm_dp_test_data", 0444, + debugfs_create_file("dp_test_data", 0444, root, - debug, &dp_test_data_fops); + debug, &msm_dp_test_data_fops); - debugfs_create_file("msm_dp_test_type", 0444, + debugfs_create_file("dp_test_type", 0444, root, - debug, &dp_test_type_fops); + debug, &msm_dp_test_type_fops); } return 0; diff --git a/drivers/gpu/drm/msm/dp/dp_debug.h b/drivers/gpu/drm/msm/dp/dp_debug.h index 7e1aa892fc09..6dc0ff4f0f65 100644 --- a/drivers/gpu/drm/msm/dp/dp_debug.h +++ b/drivers/gpu/drm/msm/dp/dp_debug.h @@ -12,7 +12,7 @@ #if defined(CONFIG_DEBUG_FS) /** - * dp_debug_get() - configure and get the DisplayPlot debug module data + * msm_dp_debug_get() - configure and get the DisplayPlot debug module data * * @dev: device instance of the caller * @panel: instance of panel module @@ -25,8 +25,8 @@ * This function sets up the debug module and provides a way * for debugfs input to be communicated with existing modules */ -int dp_debug_init(struct device *dev, struct dp_panel *panel, - struct dp_link *link, +int msm_dp_debug_init(struct device *dev, struct msm_dp_panel *panel, + struct msm_dp_link *link, struct drm_connector *connector, struct dentry *root, bool is_edp); @@ -34,8 +34,8 @@ int dp_debug_init(struct device *dev, struct dp_panel *panel, #else static inline -int dp_debug_init(struct device *dev, struct dp_panel *panel, - struct dp_link *link, +int msm_dp_debug_init(struct device *dev, struct msm_dp_panel *panel, + struct msm_dp_link *link, struct drm_connector *connector, struct dentry *root, bool is_edp) diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c index e1228fb093ee..aff51bb973eb 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.c +++ b/drivers/gpu/drm/msm/dp/dp_display.c @@ -67,13 +67,13 @@ enum { #define WAIT_FOR_RESUME_TIMEOUT_JIFFIES (HZ / 2) -struct dp_event { +struct msm_dp_event { u32 event_id; u32 data; u32 delay; }; -struct dp_display_private { +struct msm_dp_display_private { int irq; unsigned int id; @@ -85,14 +85,14 @@ struct dp_display_private { struct drm_device *drm_dev; - struct dp_catalog *catalog; + struct msm_dp_catalog *catalog; struct drm_dp_aux *aux; - struct dp_link *link; - struct dp_panel *panel; - struct dp_ctrl *ctrl; + struct msm_dp_link *link; + struct msm_dp_panel *panel; + struct msm_dp_ctrl *ctrl; - struct dp_display_mode dp_mode; - struct msm_dp dp_display; + struct msm_dp_display_mode msm_dp_mode; + struct msm_dp msm_dp_display; /* wait for audio signaling */ struct completion audio_comp; @@ -104,12 +104,12 @@ struct dp_display_private { u32 event_pndx; u32 event_gndx; struct task_struct *ev_tsk; - struct dp_event event_list[DP_EVENT_Q_MAX]; + struct msm_dp_event event_list[DP_EVENT_Q_MAX]; spinlock_t event_lock; bool wide_bus_supported; - struct dp_audio *audio; + struct msm_dp_audio *audio; }; struct msm_dp_desc { @@ -118,25 +118,33 @@ struct msm_dp_desc { bool wide_bus_supported; }; -static const struct msm_dp_desc sc7180_dp_descs[] = { +static const struct msm_dp_desc msm_dp_desc_sa8775p[] = { + { .io_start = 0x0af54000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true }, + { .io_start = 0x0af5c000, .id = MSM_DP_CONTROLLER_1, .wide_bus_supported = true }, + { .io_start = 0x22154000, .id = MSM_DP_CONTROLLER_2, .wide_bus_supported = true }, + { .io_start = 0x2215c000, .id = MSM_DP_CONTROLLER_3, .wide_bus_supported = true }, + {} +}; + +static const struct msm_dp_desc msm_dp_desc_sc7180[] = { { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true }, {} }; -static const struct msm_dp_desc sc7280_dp_descs[] = { +static const struct msm_dp_desc msm_dp_desc_sc7280[] = { { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true }, { .io_start = 0x0aea0000, .id = MSM_DP_CONTROLLER_1, .wide_bus_supported = true }, {} }; -static const struct msm_dp_desc sc8180x_dp_descs[] = { +static const struct msm_dp_desc msm_dp_desc_sc8180x[] = { { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true }, { .io_start = 0x0ae98000, .id = MSM_DP_CONTROLLER_1, .wide_bus_supported = true }, { .io_start = 0x0ae9a000, .id = MSM_DP_CONTROLLER_2, .wide_bus_supported = true }, {} }; -static const struct msm_dp_desc sc8280xp_dp_descs[] = { +static const struct msm_dp_desc msm_dp_desc_sc8280xp[] = { { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true }, { .io_start = 0x0ae98000, .id = MSM_DP_CONTROLLER_1, .wide_bus_supported = true }, { .io_start = 0x0ae9a000, .id = MSM_DP_CONTROLLER_2, .wide_bus_supported = true }, @@ -148,12 +156,12 @@ static const struct msm_dp_desc sc8280xp_dp_descs[] = { {} }; -static const struct msm_dp_desc sm8650_dp_descs[] = { +static const struct msm_dp_desc msm_dp_desc_sm8650[] = { { .io_start = 0x0af54000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true }, {} }; -static const struct msm_dp_desc x1e80100_dp_descs[] = { +static const struct msm_dp_desc msm_dp_desc_x1e80100[] = { { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true }, { .io_start = 0x0ae98000, .id = MSM_DP_CONTROLLER_1, .wide_bus_supported = true }, { .io_start = 0x0ae9a000, .id = MSM_DP_CONTROLLER_2, .wide_bus_supported = true }, @@ -161,70 +169,71 @@ static const struct msm_dp_desc x1e80100_dp_descs[] = { {} }; -static const struct of_device_id dp_dt_match[] = { - { .compatible = "qcom,sc7180-dp", .data = &sc7180_dp_descs }, - { .compatible = "qcom,sc7280-dp", .data = &sc7280_dp_descs }, - { .compatible = "qcom,sc7280-edp", .data = &sc7280_dp_descs }, - { .compatible = "qcom,sc8180x-dp", .data = &sc8180x_dp_descs }, - { .compatible = "qcom,sc8180x-edp", .data = &sc8180x_dp_descs }, - { .compatible = "qcom,sc8280xp-dp", .data = &sc8280xp_dp_descs }, - { .compatible = "qcom,sc8280xp-edp", .data = &sc8280xp_dp_descs }, - { .compatible = "qcom,sdm845-dp", .data = &sc7180_dp_descs }, - { .compatible = "qcom,sm8350-dp", .data = &sc7180_dp_descs }, - { .compatible = "qcom,sm8650-dp", .data = &sm8650_dp_descs }, - { .compatible = "qcom,x1e80100-dp", .data = &x1e80100_dp_descs }, +static const struct of_device_id msm_dp_dt_match[] = { + { .compatible = "qcom,sa8775p-dp", .data = &msm_dp_desc_sa8775p }, + { .compatible = "qcom,sc7180-dp", .data = &msm_dp_desc_sc7180 }, + { .compatible = "qcom,sc7280-dp", .data = &msm_dp_desc_sc7280 }, + { .compatible = "qcom,sc7280-edp", .data = &msm_dp_desc_sc7280 }, + { .compatible = "qcom,sc8180x-dp", .data = &msm_dp_desc_sc8180x }, + { .compatible = "qcom,sc8180x-edp", .data = &msm_dp_desc_sc8180x }, + { .compatible = "qcom,sc8280xp-dp", .data = &msm_dp_desc_sc8280xp }, + { .compatible = "qcom,sc8280xp-edp", .data = &msm_dp_desc_sc8280xp }, + { .compatible = "qcom,sdm845-dp", .data = &msm_dp_desc_sc7180 }, + { .compatible = "qcom,sm8350-dp", .data = &msm_dp_desc_sc7180 }, + { .compatible = "qcom,sm8650-dp", .data = &msm_dp_desc_sm8650 }, + { .compatible = "qcom,x1e80100-dp", .data = &msm_dp_desc_x1e80100 }, {} }; -static struct dp_display_private *dev_get_dp_display_private(struct device *dev) +static struct msm_dp_display_private *dev_get_dp_display_private(struct device *dev) { struct msm_dp *dp = dev_get_drvdata(dev); - return container_of(dp, struct dp_display_private, dp_display); + return container_of(dp, struct msm_dp_display_private, msm_dp_display); } -static int dp_add_event(struct dp_display_private *dp_priv, u32 event, +static int msm_dp_add_event(struct msm_dp_display_private *msm_dp_priv, u32 event, u32 data, u32 delay) { unsigned long flag; - struct dp_event *todo; + struct msm_dp_event *todo; int pndx; - spin_lock_irqsave(&dp_priv->event_lock, flag); - pndx = dp_priv->event_pndx + 1; + spin_lock_irqsave(&msm_dp_priv->event_lock, flag); + pndx = msm_dp_priv->event_pndx + 1; pndx %= DP_EVENT_Q_MAX; - if (pndx == dp_priv->event_gndx) { + if (pndx == msm_dp_priv->event_gndx) { pr_err("event_q is full: pndx=%d gndx=%d\n", - dp_priv->event_pndx, dp_priv->event_gndx); - spin_unlock_irqrestore(&dp_priv->event_lock, flag); + msm_dp_priv->event_pndx, msm_dp_priv->event_gndx); + spin_unlock_irqrestore(&msm_dp_priv->event_lock, flag); return -EPERM; } - todo = &dp_priv->event_list[dp_priv->event_pndx++]; - dp_priv->event_pndx %= DP_EVENT_Q_MAX; + todo = &msm_dp_priv->event_list[msm_dp_priv->event_pndx++]; + msm_dp_priv->event_pndx %= DP_EVENT_Q_MAX; todo->event_id = event; todo->data = data; todo->delay = delay; - wake_up(&dp_priv->event_q); - spin_unlock_irqrestore(&dp_priv->event_lock, flag); + wake_up(&msm_dp_priv->event_q); + spin_unlock_irqrestore(&msm_dp_priv->event_lock, flag); return 0; } -static int dp_del_event(struct dp_display_private *dp_priv, u32 event) +static int msm_dp_del_event(struct msm_dp_display_private *msm_dp_priv, u32 event) { unsigned long flag; - struct dp_event *todo; + struct msm_dp_event *todo; u32 gndx; - spin_lock_irqsave(&dp_priv->event_lock, flag); - if (dp_priv->event_pndx == dp_priv->event_gndx) { - spin_unlock_irqrestore(&dp_priv->event_lock, flag); + spin_lock_irqsave(&msm_dp_priv->event_lock, flag); + if (msm_dp_priv->event_pndx == msm_dp_priv->event_gndx) { + spin_unlock_irqrestore(&msm_dp_priv->event_lock, flag); return -ENOENT; } - gndx = dp_priv->event_gndx; - while (dp_priv->event_pndx != gndx) { - todo = &dp_priv->event_list[gndx]; + gndx = msm_dp_priv->event_gndx; + while (msm_dp_priv->event_pndx != gndx) { + todo = &msm_dp_priv->event_list[gndx]; if (todo->event_id == event) { todo->event_id = EV_NO_EVENT; /* deleted */ todo->delay = 0; @@ -232,60 +241,60 @@ static int dp_del_event(struct dp_display_private *dp_priv, u32 event) gndx++; gndx %= DP_EVENT_Q_MAX; } - spin_unlock_irqrestore(&dp_priv->event_lock, flag); + spin_unlock_irqrestore(&msm_dp_priv->event_lock, flag); return 0; } -void dp_display_signal_audio_start(struct msm_dp *dp_display) +void msm_dp_display_signal_audio_start(struct msm_dp *msm_dp_display) { - struct dp_display_private *dp; + struct msm_dp_display_private *dp; - dp = container_of(dp_display, struct dp_display_private, dp_display); + dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display); reinit_completion(&dp->audio_comp); } -void dp_display_signal_audio_complete(struct msm_dp *dp_display) +void msm_dp_display_signal_audio_complete(struct msm_dp *msm_dp_display) { - struct dp_display_private *dp; + struct msm_dp_display_private *dp; - dp = container_of(dp_display, struct dp_display_private, dp_display); + dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display); complete_all(&dp->audio_comp); } -static int dp_hpd_event_thread_start(struct dp_display_private *dp_priv); +static int msm_dp_hpd_event_thread_start(struct msm_dp_display_private *msm_dp_priv); -static int dp_display_bind(struct device *dev, struct device *master, +static int msm_dp_display_bind(struct device *dev, struct device *master, void *data) { int rc = 0; - struct dp_display_private *dp = dev_get_dp_display_private(dev); + struct msm_dp_display_private *dp = dev_get_dp_display_private(dev); struct msm_drm_private *priv = dev_get_drvdata(master); struct drm_device *drm = priv->dev; - dp->dp_display.drm_dev = drm; - priv->dp[dp->id] = &dp->dp_display; + dp->msm_dp_display.drm_dev = drm; + priv->dp[dp->id] = &dp->msm_dp_display; dp->drm_dev = drm; dp->aux->drm_dev = drm; - rc = dp_aux_register(dp->aux); + rc = msm_dp_aux_register(dp->aux); if (rc) { DRM_ERROR("DRM DP AUX register failed\n"); goto end; } - rc = dp_register_audio_driver(dev, dp->audio); + rc = msm_dp_register_audio_driver(dev, dp->audio); if (rc) { DRM_ERROR("Audio registration Dp failed\n"); goto end; } - rc = dp_hpd_event_thread_start(dp); + rc = msm_dp_hpd_event_thread_start(dp); if (rc) { DRM_ERROR("Event thread create failed\n"); goto end; @@ -296,44 +305,44 @@ end: return rc; } -static void dp_display_unbind(struct device *dev, struct device *master, +static void msm_dp_display_unbind(struct device *dev, struct device *master, void *data) { - struct dp_display_private *dp = dev_get_dp_display_private(dev); + struct msm_dp_display_private *dp = dev_get_dp_display_private(dev); struct msm_drm_private *priv = dev_get_drvdata(master); kthread_stop(dp->ev_tsk); of_dp_aux_depopulate_bus(dp->aux); - dp_unregister_audio_driver(dev, dp->audio); - dp_aux_unregister(dp->aux); + msm_dp_unregister_audio_driver(dev, dp->audio); + msm_dp_aux_unregister(dp->aux); dp->drm_dev = NULL; dp->aux->drm_dev = NULL; priv->dp[dp->id] = NULL; } -static const struct component_ops dp_display_comp_ops = { - .bind = dp_display_bind, - .unbind = dp_display_unbind, +static const struct component_ops msm_dp_display_comp_ops = { + .bind = msm_dp_display_bind, + .unbind = msm_dp_display_unbind, }; -static void dp_display_send_hpd_event(struct msm_dp *dp_display) +static void msm_dp_display_send_hpd_event(struct msm_dp *msm_dp_display) { - struct dp_display_private *dp; + struct msm_dp_display_private *dp; struct drm_connector *connector; - dp = container_of(dp_display, struct dp_display_private, dp_display); + dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display); - connector = dp->dp_display.connector; + connector = dp->msm_dp_display.connector; drm_helper_hpd_irq_event(connector->dev); } -static int dp_display_send_hpd_notification(struct dp_display_private *dp, +static int msm_dp_display_send_hpd_notification(struct msm_dp_display_private *dp, bool hpd) { - if ((hpd && dp->dp_display.link_ready) || - (!hpd && !dp->dp_display.link_ready)) { + if ((hpd && dp->msm_dp_display.link_ready) || + (!hpd && !dp->msm_dp_display.link_ready)) { drm_dbg_dp(dp->drm_dev, "HPD already %s\n", (hpd ? "on" : "off")); return 0; @@ -342,139 +351,139 @@ static int dp_display_send_hpd_notification(struct dp_display_private *dp, /* reset video pattern flag on disconnect */ if (!hpd) { dp->panel->video_test = false; - if (!dp->dp_display.is_edp) - drm_dp_set_subconnector_property(dp->dp_display.connector, + if (!dp->msm_dp_display.is_edp) + drm_dp_set_subconnector_property(dp->msm_dp_display.connector, connector_status_disconnected, dp->panel->dpcd, dp->panel->downstream_ports); } - dp->dp_display.link_ready = hpd; + dp->msm_dp_display.link_ready = hpd; drm_dbg_dp(dp->drm_dev, "type=%d hpd=%d\n", - dp->dp_display.connector_type, hpd); - dp_display_send_hpd_event(&dp->dp_display); + dp->msm_dp_display.connector_type, hpd); + msm_dp_display_send_hpd_event(&dp->msm_dp_display); return 0; } -static int dp_display_process_hpd_high(struct dp_display_private *dp) +static int msm_dp_display_process_hpd_high(struct msm_dp_display_private *dp) { - struct drm_connector *connector = dp->dp_display.connector; + struct drm_connector *connector = dp->msm_dp_display.connector; const struct drm_display_info *info = &connector->display_info; int rc = 0; - rc = dp_panel_read_sink_caps(dp->panel, connector); + rc = msm_dp_panel_read_sink_caps(dp->panel, connector); if (rc) goto end; - dp_link_process_request(dp->link); + msm_dp_link_process_request(dp->link); - if (!dp->dp_display.is_edp) + if (!dp->msm_dp_display.is_edp) drm_dp_set_subconnector_property(connector, connector_status_connected, dp->panel->dpcd, dp->panel->downstream_ports); - dp->dp_display.psr_supported = dp->panel->psr_cap.version && psr_enabled; + dp->msm_dp_display.psr_supported = dp->panel->psr_cap.version && psr_enabled; dp->audio_supported = info->has_audio; - dp_panel_handle_sink_request(dp->panel); + msm_dp_panel_handle_sink_request(dp->panel); /* * set sink to normal operation mode -- D0 * before dpcd read */ - dp_link_psm_config(dp->link, &dp->panel->link_info, false); + msm_dp_link_psm_config(dp->link, &dp->panel->link_info, false); - dp_link_reset_phy_params_vx_px(dp->link); - rc = dp_ctrl_on_link(dp->ctrl); + msm_dp_link_reset_phy_params_vx_px(dp->link); + rc = msm_dp_ctrl_on_link(dp->ctrl); if (rc) { DRM_ERROR("failed to complete DP link training\n"); goto end; } - dp_add_event(dp, EV_USER_NOTIFICATION, true, 0); + msm_dp_add_event(dp, EV_USER_NOTIFICATION, true, 0); end: return rc; } -static void dp_display_host_phy_init(struct dp_display_private *dp) +static void msm_dp_display_host_phy_init(struct msm_dp_display_private *dp) { drm_dbg_dp(dp->drm_dev, "type=%d core_init=%d phy_init=%d\n", - dp->dp_display.connector_type, dp->core_initialized, + dp->msm_dp_display.connector_type, dp->core_initialized, dp->phy_initialized); if (!dp->phy_initialized) { - dp_ctrl_phy_init(dp->ctrl); + msm_dp_ctrl_phy_init(dp->ctrl); dp->phy_initialized = true; } } -static void dp_display_host_phy_exit(struct dp_display_private *dp) +static void msm_dp_display_host_phy_exit(struct msm_dp_display_private *dp) { drm_dbg_dp(dp->drm_dev, "type=%d core_init=%d phy_init=%d\n", - dp->dp_display.connector_type, dp->core_initialized, + dp->msm_dp_display.connector_type, dp->core_initialized, dp->phy_initialized); if (dp->phy_initialized) { - dp_ctrl_phy_exit(dp->ctrl); + msm_dp_ctrl_phy_exit(dp->ctrl); dp->phy_initialized = false; } } -static void dp_display_host_init(struct dp_display_private *dp) +static void msm_dp_display_host_init(struct msm_dp_display_private *dp) { drm_dbg_dp(dp->drm_dev, "type=%d core_init=%d phy_init=%d\n", - dp->dp_display.connector_type, dp->core_initialized, + dp->msm_dp_display.connector_type, dp->core_initialized, dp->phy_initialized); - dp_ctrl_core_clk_enable(dp->ctrl); - dp_ctrl_reset_irq_ctrl(dp->ctrl, true); - dp_aux_init(dp->aux); + msm_dp_ctrl_core_clk_enable(dp->ctrl); + msm_dp_ctrl_reset_irq_ctrl(dp->ctrl, true); + msm_dp_aux_init(dp->aux); dp->core_initialized = true; } -static void dp_display_host_deinit(struct dp_display_private *dp) +static void msm_dp_display_host_deinit(struct msm_dp_display_private *dp) { drm_dbg_dp(dp->drm_dev, "type=%d core_init=%d phy_init=%d\n", - dp->dp_display.connector_type, dp->core_initialized, + dp->msm_dp_display.connector_type, dp->core_initialized, dp->phy_initialized); - dp_ctrl_reset_irq_ctrl(dp->ctrl, false); - dp_aux_deinit(dp->aux); - dp_ctrl_core_clk_disable(dp->ctrl); + msm_dp_ctrl_reset_irq_ctrl(dp->ctrl, false); + msm_dp_aux_deinit(dp->aux); + msm_dp_ctrl_core_clk_disable(dp->ctrl); dp->core_initialized = false; } -static int dp_display_usbpd_configure_cb(struct device *dev) +static int msm_dp_display_usbpd_configure_cb(struct device *dev) { - struct dp_display_private *dp = dev_get_dp_display_private(dev); + struct msm_dp_display_private *dp = dev_get_dp_display_private(dev); - dp_display_host_phy_init(dp); + msm_dp_display_host_phy_init(dp); - return dp_display_process_hpd_high(dp); + return msm_dp_display_process_hpd_high(dp); } -static int dp_display_notify_disconnect(struct device *dev) +static int msm_dp_display_notify_disconnect(struct device *dev) { - struct dp_display_private *dp = dev_get_dp_display_private(dev); + struct msm_dp_display_private *dp = dev_get_dp_display_private(dev); - dp_add_event(dp, EV_USER_NOTIFICATION, false, 0); + msm_dp_add_event(dp, EV_USER_NOTIFICATION, false, 0); return 0; } -static void dp_display_handle_video_request(struct dp_display_private *dp) +static void msm_dp_display_handle_video_request(struct msm_dp_display_private *dp) { if (dp->link->sink_request & DP_TEST_LINK_VIDEO_PATTERN) { dp->panel->video_test = true; - dp_link_send_test_response(dp->link); + msm_dp_link_send_test_response(dp->link); } } -static int dp_display_handle_port_status_changed(struct dp_display_private *dp) +static int msm_dp_display_handle_port_status_changed(struct msm_dp_display_private *dp) { int rc = 0; @@ -482,12 +491,12 @@ static int dp_display_handle_port_status_changed(struct dp_display_private *dp) drm_dbg_dp(dp->drm_dev, "sink count is zero, nothing to do\n"); if (dp->hpd_state != ST_DISCONNECTED) { dp->hpd_state = ST_DISCONNECT_PENDING; - dp_add_event(dp, EV_USER_NOTIFICATION, false, 0); + msm_dp_add_event(dp, EV_USER_NOTIFICATION, false, 0); } } else { if (dp->hpd_state == ST_DISCONNECTED) { dp->hpd_state = ST_MAINLINK_READY; - rc = dp_display_process_hpd_high(dp); + rc = msm_dp_display_process_hpd_high(dp); if (rc) dp->hpd_state = ST_DISCONNECTED; } @@ -496,7 +505,7 @@ static int dp_display_handle_port_status_changed(struct dp_display_private *dp) return rc; } -static int dp_display_handle_irq_hpd(struct dp_display_private *dp) +static int msm_dp_display_handle_irq_hpd(struct msm_dp_display_private *dp) { u32 sink_request = dp->link->sink_request; @@ -510,48 +519,48 @@ static int dp_display_handle_irq_hpd(struct dp_display_private *dp) } } - dp_ctrl_handle_sink_request(dp->ctrl); + msm_dp_ctrl_handle_sink_request(dp->ctrl); if (sink_request & DP_TEST_LINK_VIDEO_PATTERN) - dp_display_handle_video_request(dp); + msm_dp_display_handle_video_request(dp); return 0; } -static int dp_display_usbpd_attention_cb(struct device *dev) +static int msm_dp_display_usbpd_attention_cb(struct device *dev) { int rc = 0; u32 sink_request; - struct dp_display_private *dp = dev_get_dp_display_private(dev); + struct msm_dp_display_private *dp = dev_get_dp_display_private(dev); /* check for any test request issued by sink */ - rc = dp_link_process_request(dp->link); + rc = msm_dp_link_process_request(dp->link); if (!rc) { sink_request = dp->link->sink_request; drm_dbg_dp(dp->drm_dev, "hpd_state=%d sink_request=%d\n", dp->hpd_state, sink_request); if (sink_request & DS_PORT_STATUS_CHANGED) - rc = dp_display_handle_port_status_changed(dp); + rc = msm_dp_display_handle_port_status_changed(dp); else - rc = dp_display_handle_irq_hpd(dp); + rc = msm_dp_display_handle_irq_hpd(dp); } return rc; } -static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data) +static int msm_dp_hpd_plug_handle(struct msm_dp_display_private *dp, u32 data) { u32 state; int ret; - struct platform_device *pdev = dp->dp_display.pdev; + struct platform_device *pdev = dp->msm_dp_display.pdev; - dp_aux_enable_xfers(dp->aux, true); + msm_dp_aux_enable_xfers(dp->aux, true); mutex_lock(&dp->event_mutex); state = dp->hpd_state; drm_dbg_dp(dp->drm_dev, "Before, type=%d hpd_state=%d\n", - dp->dp_display.connector_type, state); + dp->msm_dp_display.connector_type, state); if (state == ST_DISPLAY_OFF) { mutex_unlock(&dp->event_mutex); @@ -565,7 +574,7 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data) if (state == ST_DISCONNECT_PENDING) { /* wait until ST_DISCONNECTED */ - dp_add_event(dp, EV_HPD_PLUG_INT, 0, 1); /* delay = 1 */ + msm_dp_add_event(dp, EV_HPD_PLUG_INT, 0, 1); /* delay = 1 */ mutex_unlock(&dp->event_mutex); return 0; } @@ -577,7 +586,7 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data) return ret; } - ret = dp_display_usbpd_configure_cb(&pdev->dev); + ret = msm_dp_display_usbpd_configure_cb(&pdev->dev); if (ret) { /* link train failed */ dp->hpd_state = ST_DISCONNECTED; pm_runtime_put_sync(&pdev->dev); @@ -586,60 +595,60 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data) } drm_dbg_dp(dp->drm_dev, "After, type=%d hpd_state=%d\n", - dp->dp_display.connector_type, state); + dp->msm_dp_display.connector_type, state); mutex_unlock(&dp->event_mutex); /* uevent will complete connection part */ return 0; }; -static void dp_display_handle_plugged_change(struct msm_dp *dp_display, +static void msm_dp_display_handle_plugged_change(struct msm_dp *msm_dp_display, bool plugged) { - struct dp_display_private *dp; + struct msm_dp_display_private *dp; - dp = container_of(dp_display, - struct dp_display_private, dp_display); + dp = container_of(msm_dp_display, + struct msm_dp_display_private, msm_dp_display); /* notify audio subsystem only if sink supports audio */ - if (dp_display->plugged_cb && dp_display->codec_dev && + if (msm_dp_display->plugged_cb && msm_dp_display->codec_dev && dp->audio_supported) - dp_display->plugged_cb(dp_display->codec_dev, plugged); + msm_dp_display->plugged_cb(msm_dp_display->codec_dev, plugged); } -static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data) +static int msm_dp_hpd_unplug_handle(struct msm_dp_display_private *dp, u32 data) { u32 state; - struct platform_device *pdev = dp->dp_display.pdev; + struct platform_device *pdev = dp->msm_dp_display.pdev; - dp_aux_enable_xfers(dp->aux, false); + msm_dp_aux_enable_xfers(dp->aux, false); mutex_lock(&dp->event_mutex); state = dp->hpd_state; drm_dbg_dp(dp->drm_dev, "Before, type=%d hpd_state=%d\n", - dp->dp_display.connector_type, state); + dp->msm_dp_display.connector_type, state); /* unplugged, no more irq_hpd handle */ - dp_del_event(dp, EV_IRQ_HPD_INT); + msm_dp_del_event(dp, EV_IRQ_HPD_INT); if (state == ST_DISCONNECTED) { /* triggered by irq_hdp with sink_count = 0 */ if (dp->link->sink_count == 0) { - dp_display_host_phy_exit(dp); + msm_dp_display_host_phy_exit(dp); } - dp_display_notify_disconnect(&dp->dp_display.pdev->dev); + msm_dp_display_notify_disconnect(&dp->msm_dp_display.pdev->dev); mutex_unlock(&dp->event_mutex); return 0; } else if (state == ST_DISCONNECT_PENDING) { mutex_unlock(&dp->event_mutex); return 0; } else if (state == ST_MAINLINK_READY) { - dp_ctrl_off_link(dp->ctrl); - dp_display_host_phy_exit(dp); + msm_dp_ctrl_off_link(dp->ctrl); + msm_dp_display_host_phy_exit(dp); dp->hpd_state = ST_DISCONNECTED; - dp_display_notify_disconnect(&dp->dp_display.pdev->dev); + msm_dp_display_notify_disconnect(&dp->msm_dp_display.pdev->dev); pm_runtime_put_sync(&pdev->dev); mutex_unlock(&dp->event_mutex); return 0; @@ -649,7 +658,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data) * We don't need separate work for disconnect as * connect/attention interrupts are disabled */ - dp_display_notify_disconnect(&dp->dp_display.pdev->dev); + msm_dp_display_notify_disconnect(&dp->msm_dp_display.pdev->dev); if (state == ST_DISPLAY_OFF) { dp->hpd_state = ST_DISCONNECTED; @@ -658,10 +667,10 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data) } /* signal the disconnect event early to ensure proper teardown */ - dp_display_handle_plugged_change(&dp->dp_display, false); + msm_dp_display_handle_plugged_change(&dp->msm_dp_display, false); drm_dbg_dp(dp->drm_dev, "After, type=%d hpd_state=%d\n", - dp->dp_display.connector_type, state); + dp->msm_dp_display.connector_type, state); /* uevent will complete disconnection part */ pm_runtime_put_sync(&pdev->dev); @@ -669,7 +678,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data) return 0; } -static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data) +static int msm_dp_irq_hpd_handle(struct msm_dp_display_private *dp, u32 data) { u32 state; @@ -678,7 +687,7 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data) /* irq_hpd can happen at either connected or disconnected state */ state = dp->hpd_state; drm_dbg_dp(dp->drm_dev, "Before, type=%d hpd_state=%d\n", - dp->dp_display.connector_type, state); + dp->msm_dp_display.connector_type, state); if (state == ST_DISPLAY_OFF) { mutex_unlock(&dp->event_mutex); @@ -687,33 +696,33 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data) if (state == ST_MAINLINK_READY || state == ST_DISCONNECT_PENDING) { /* wait until ST_CONNECTED */ - dp_add_event(dp, EV_IRQ_HPD_INT, 0, 1); /* delay = 1 */ + msm_dp_add_event(dp, EV_IRQ_HPD_INT, 0, 1); /* delay = 1 */ mutex_unlock(&dp->event_mutex); return 0; } - dp_display_usbpd_attention_cb(&dp->dp_display.pdev->dev); + msm_dp_display_usbpd_attention_cb(&dp->msm_dp_display.pdev->dev); drm_dbg_dp(dp->drm_dev, "After, type=%d hpd_state=%d\n", - dp->dp_display.connector_type, state); + dp->msm_dp_display.connector_type, state); mutex_unlock(&dp->event_mutex); return 0; } -static void dp_display_deinit_sub_modules(struct dp_display_private *dp) +static void msm_dp_display_deinit_sub_modules(struct msm_dp_display_private *dp) { - dp_audio_put(dp->audio); - dp_panel_put(dp->panel); - dp_aux_put(dp->aux); + msm_dp_audio_put(dp->audio); + msm_dp_panel_put(dp->panel); + msm_dp_aux_put(dp->aux); } -static int dp_init_sub_modules(struct dp_display_private *dp) +static int msm_dp_init_sub_modules(struct msm_dp_display_private *dp) { int rc = 0; - struct device *dev = &dp->dp_display.pdev->dev; - struct dp_panel_in panel_in = { + struct device *dev = &dp->msm_dp_display.pdev->dev; + struct msm_dp_panel_in panel_in = { .dev = dev, }; struct phy *phy; @@ -723,14 +732,14 @@ static int dp_init_sub_modules(struct dp_display_private *dp) return PTR_ERR(phy); rc = phy_set_mode_ext(phy, PHY_MODE_DP, - dp->dp_display.is_edp ? PHY_SUBMODE_EDP : PHY_SUBMODE_DP); + dp->msm_dp_display.is_edp ? PHY_SUBMODE_EDP : PHY_SUBMODE_DP); if (rc) { DRM_ERROR("failed to set phy submode, rc = %d\n", rc); dp->catalog = NULL; goto error; } - dp->catalog = dp_catalog_get(dev); + dp->catalog = msm_dp_catalog_get(dev); if (IS_ERR(dp->catalog)) { rc = PTR_ERR(dp->catalog); DRM_ERROR("failed to initialize catalog, rc = %d\n", rc); @@ -738,9 +747,9 @@ static int dp_init_sub_modules(struct dp_display_private *dp) goto error; } - dp->aux = dp_aux_get(dev, dp->catalog, + dp->aux = msm_dp_aux_get(dev, dp->catalog, phy, - dp->dp_display.is_edp); + dp->msm_dp_display.is_edp); if (IS_ERR(dp->aux)) { rc = PTR_ERR(dp->aux); DRM_ERROR("failed to initialize aux, rc = %d\n", rc); @@ -748,7 +757,7 @@ static int dp_init_sub_modules(struct dp_display_private *dp) goto error; } - dp->link = dp_link_get(dev, dp->aux); + dp->link = msm_dp_link_get(dev, dp->aux); if (IS_ERR(dp->link)) { rc = PTR_ERR(dp->link); DRM_ERROR("failed to initialize link, rc = %d\n", rc); @@ -760,7 +769,7 @@ static int dp_init_sub_modules(struct dp_display_private *dp) panel_in.catalog = dp->catalog; panel_in.link = dp->link; - dp->panel = dp_panel_get(&panel_in); + dp->panel = msm_dp_panel_get(&panel_in); if (IS_ERR(dp->panel)) { rc = PTR_ERR(dp->panel); DRM_ERROR("failed to initialize panel, rc = %d\n", rc); @@ -768,7 +777,7 @@ static int dp_init_sub_modules(struct dp_display_private *dp) goto error_link; } - dp->ctrl = dp_ctrl_get(dev, dp->link, dp->panel, dp->aux, + dp->ctrl = msm_dp_ctrl_get(dev, dp->link, dp->panel, dp->aux, dp->catalog, phy); if (IS_ERR(dp->ctrl)) { @@ -778,7 +787,7 @@ static int dp_init_sub_modules(struct dp_display_private *dp) goto error_ctrl; } - dp->audio = dp_audio_get(dp->dp_display.pdev, dp->panel, dp->catalog); + dp->audio = msm_dp_audio_get(dp->msm_dp_display.pdev, dp->panel, dp->catalog); if (IS_ERR(dp->audio)) { rc = PTR_ERR(dp->audio); pr_err("failed to initialize audio, rc = %d\n", rc); @@ -789,51 +798,51 @@ static int dp_init_sub_modules(struct dp_display_private *dp) return rc; error_ctrl: - dp_panel_put(dp->panel); + msm_dp_panel_put(dp->panel); error_link: - dp_aux_put(dp->aux); + msm_dp_aux_put(dp->aux); error: return rc; } -static int dp_display_set_mode(struct msm_dp *dp_display, - struct dp_display_mode *mode) +static int msm_dp_display_set_mode(struct msm_dp *msm_dp_display, + struct msm_dp_display_mode *mode) { - struct dp_display_private *dp; + struct msm_dp_display_private *dp; - dp = container_of(dp_display, struct dp_display_private, dp_display); + dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display); - drm_mode_copy(&dp->panel->dp_mode.drm_mode, &mode->drm_mode); - dp->panel->dp_mode.bpp = mode->bpp; - dp->panel->dp_mode.out_fmt_is_yuv_420 = mode->out_fmt_is_yuv_420; - dp_panel_init_panel_info(dp->panel); + drm_mode_copy(&dp->panel->msm_dp_mode.drm_mode, &mode->drm_mode); + dp->panel->msm_dp_mode.bpp = mode->bpp; + dp->panel->msm_dp_mode.out_fmt_is_yuv_420 = mode->out_fmt_is_yuv_420; + msm_dp_panel_init_panel_info(dp->panel); return 0; } -static int dp_display_enable(struct dp_display_private *dp, bool force_link_train) +static int msm_dp_display_enable(struct msm_dp_display_private *dp, bool force_link_train) { int rc = 0; - struct msm_dp *dp_display = &dp->dp_display; + struct msm_dp *msm_dp_display = &dp->msm_dp_display; drm_dbg_dp(dp->drm_dev, "sink_count=%d\n", dp->link->sink_count); - if (dp_display->power_on) { + if (msm_dp_display->power_on) { drm_dbg_dp(dp->drm_dev, "Link already setup, return\n"); return 0; } - rc = dp_ctrl_on_stream(dp->ctrl, force_link_train); + rc = msm_dp_ctrl_on_stream(dp->ctrl, force_link_train); if (!rc) - dp_display->power_on = true; + msm_dp_display->power_on = true; return rc; } -static int dp_display_post_enable(struct msm_dp *dp_display) +static int msm_dp_display_post_enable(struct msm_dp *msm_dp_display) { - struct dp_display_private *dp; + struct msm_dp_display_private *dp; u32 rate; - dp = container_of(dp_display, struct dp_display_private, dp_display); + dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display); rate = dp->link->link_params.rate; @@ -843,85 +852,85 @@ static int dp_display_post_enable(struct msm_dp *dp_display) } /* signal the connect event late to synchronize video and display */ - dp_display_handle_plugged_change(dp_display, true); + msm_dp_display_handle_plugged_change(msm_dp_display, true); - if (dp_display->psr_supported) - dp_ctrl_config_psr(dp->ctrl); + if (msm_dp_display->psr_supported) + msm_dp_ctrl_config_psr(dp->ctrl); return 0; } -static int dp_display_disable(struct dp_display_private *dp) +static int msm_dp_display_disable(struct msm_dp_display_private *dp) { - struct msm_dp *dp_display = &dp->dp_display; + struct msm_dp *msm_dp_display = &dp->msm_dp_display; - if (!dp_display->power_on) + if (!msm_dp_display->power_on) return 0; /* wait only if audio was enabled */ - if (dp_display->audio_enabled) { + if (msm_dp_display->audio_enabled) { /* signal the disconnect event */ - dp_display_handle_plugged_change(dp_display, false); + msm_dp_display_handle_plugged_change(msm_dp_display, false); if (!wait_for_completion_timeout(&dp->audio_comp, HZ * 5)) DRM_ERROR("audio comp timeout\n"); } - dp_display->audio_enabled = false; + msm_dp_display->audio_enabled = false; if (dp->link->sink_count == 0) { /* * irq_hpd with sink_count = 0 * hdmi unplugged out of dongle */ - dp_ctrl_off_link_stream(dp->ctrl); + msm_dp_ctrl_off_link_stream(dp->ctrl); } else { /* * unplugged interrupt * dongle unplugged out of DUT */ - dp_ctrl_off(dp->ctrl); - dp_display_host_phy_exit(dp); + msm_dp_ctrl_off(dp->ctrl); + msm_dp_display_host_phy_exit(dp); } - dp_display->power_on = false; + msm_dp_display->power_on = false; drm_dbg_dp(dp->drm_dev, "sink count: %d\n", dp->link->sink_count); return 0; } -int dp_display_set_plugged_cb(struct msm_dp *dp_display, +int msm_dp_display_set_plugged_cb(struct msm_dp *msm_dp_display, hdmi_codec_plugged_cb fn, struct device *codec_dev) { bool plugged; - dp_display->plugged_cb = fn; - dp_display->codec_dev = codec_dev; - plugged = dp_display->link_ready; - dp_display_handle_plugged_change(dp_display, plugged); + msm_dp_display->plugged_cb = fn; + msm_dp_display->codec_dev = codec_dev; + plugged = msm_dp_display->link_ready; + msm_dp_display_handle_plugged_change(msm_dp_display, plugged); return 0; } /** - * dp_bridge_mode_valid - callback to determine if specified mode is valid + * msm_dp_bridge_mode_valid - callback to determine if specified mode is valid * @bridge: Pointer to drm bridge structure * @info: display info * @mode: Pointer to drm mode structure * Returns: Validity status for specified mode */ -enum drm_mode_status dp_bridge_mode_valid(struct drm_bridge *bridge, +enum drm_mode_status msm_dp_bridge_mode_valid(struct drm_bridge *bridge, const struct drm_display_info *info, const struct drm_display_mode *mode) { const u32 num_components = 3, default_bpp = 24; - struct dp_display_private *dp_display; - struct dp_link_info *link_info; + struct msm_dp_display_private *msm_dp_display; + struct msm_dp_link_info *link_info; u32 mode_rate_khz = 0, supported_rate_khz = 0, mode_bpp = 0; struct msm_dp *dp; int mode_pclk_khz = mode->clock; - dp = to_dp_bridge(bridge)->dp_display; + dp = to_dp_bridge(bridge)->msm_dp_display; if (!dp || !mode_pclk_khz || !dp->connector) { DRM_ERROR("invalid params\n"); @@ -931,18 +940,18 @@ enum drm_mode_status dp_bridge_mode_valid(struct drm_bridge *bridge, if (mode->clock > DP_MAX_PIXEL_CLK_KHZ) return MODE_CLOCK_HIGH; - dp_display = container_of(dp, struct dp_display_private, dp_display); - link_info = &dp_display->panel->link_info; + msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display); + link_info = &msm_dp_display->panel->link_info; if (drm_mode_is_420_only(&dp->connector->display_info, mode) && - dp_display->panel->vsc_sdp_supported) + msm_dp_display->panel->vsc_sdp_supported) mode_pclk_khz /= 2; mode_bpp = dp->connector->display_info.bpc * num_components; if (!mode_bpp) mode_bpp = default_bpp; - mode_bpp = dp_panel_get_mode_bpp(dp_display->panel, + mode_bpp = msm_dp_panel_get_mode_bpp(msm_dp_display->panel, mode_bpp, mode_pclk_khz); mode_rate_khz = mode_pclk_khz * mode_bpp; @@ -954,50 +963,50 @@ enum drm_mode_status dp_bridge_mode_valid(struct drm_bridge *bridge, return MODE_OK; } -int dp_display_get_modes(struct msm_dp *dp) +int msm_dp_display_get_modes(struct msm_dp *dp) { - struct dp_display_private *dp_display; + struct msm_dp_display_private *msm_dp_display; if (!dp) { DRM_ERROR("invalid params\n"); return 0; } - dp_display = container_of(dp, struct dp_display_private, dp_display); + msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display); - return dp_panel_get_modes(dp_display->panel, + return msm_dp_panel_get_modes(msm_dp_display->panel, dp->connector); } -bool dp_display_check_video_test(struct msm_dp *dp) +bool msm_dp_display_check_video_test(struct msm_dp *dp) { - struct dp_display_private *dp_display; + struct msm_dp_display_private *msm_dp_display; - dp_display = container_of(dp, struct dp_display_private, dp_display); + msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display); - return dp_display->panel->video_test; + return msm_dp_display->panel->video_test; } -int dp_display_get_test_bpp(struct msm_dp *dp) +int msm_dp_display_get_test_bpp(struct msm_dp *dp) { - struct dp_display_private *dp_display; + struct msm_dp_display_private *msm_dp_display; if (!dp) { DRM_ERROR("invalid params\n"); return 0; } - dp_display = container_of(dp, struct dp_display_private, dp_display); + msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display); - return dp_link_bit_depth_to_bpp( - dp_display->link->test_video.test_bit_depth); + return msm_dp_link_bit_depth_to_bpp( + msm_dp_display->link->test_video.test_bit_depth); } void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp) { - struct dp_display_private *dp_display; + struct msm_dp_display_private *msm_dp_display; - dp_display = container_of(dp, struct dp_display_private, dp_display); + msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display); /* * if we are reading registers we need the link clocks to be on @@ -1006,65 +1015,65 @@ void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp) * power_on status before dumping DP registers to avoid crash due * to unclocked access */ - mutex_lock(&dp_display->event_mutex); + mutex_lock(&msm_dp_display->event_mutex); if (!dp->power_on) { - mutex_unlock(&dp_display->event_mutex); + mutex_unlock(&msm_dp_display->event_mutex); return; } - dp_catalog_snapshot(dp_display->catalog, disp_state); + msm_dp_catalog_snapshot(msm_dp_display->catalog, disp_state); - mutex_unlock(&dp_display->event_mutex); + mutex_unlock(&msm_dp_display->event_mutex); } -void dp_display_set_psr(struct msm_dp *dp_display, bool enter) +void msm_dp_display_set_psr(struct msm_dp *msm_dp_display, bool enter) { - struct dp_display_private *dp; + struct msm_dp_display_private *dp; - if (!dp_display) { + if (!msm_dp_display) { DRM_ERROR("invalid params\n"); return; } - dp = container_of(dp_display, struct dp_display_private, dp_display); - dp_ctrl_set_psr(dp->ctrl, enter); + dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display); + msm_dp_ctrl_set_psr(dp->ctrl, enter); } static int hpd_event_thread(void *data) { - struct dp_display_private *dp_priv; + struct msm_dp_display_private *msm_dp_priv; unsigned long flag; - struct dp_event *todo; + struct msm_dp_event *todo; int timeout_mode = 0; - dp_priv = (struct dp_display_private *)data; + msm_dp_priv = (struct msm_dp_display_private *)data; while (1) { if (timeout_mode) { - wait_event_timeout(dp_priv->event_q, - (dp_priv->event_pndx == dp_priv->event_gndx) || + wait_event_timeout(msm_dp_priv->event_q, + (msm_dp_priv->event_pndx == msm_dp_priv->event_gndx) || kthread_should_stop(), EVENT_TIMEOUT); } else { - wait_event_interruptible(dp_priv->event_q, - (dp_priv->event_pndx != dp_priv->event_gndx) || + wait_event_interruptible(msm_dp_priv->event_q, + (msm_dp_priv->event_pndx != msm_dp_priv->event_gndx) || kthread_should_stop()); } if (kthread_should_stop()) break; - spin_lock_irqsave(&dp_priv->event_lock, flag); - todo = &dp_priv->event_list[dp_priv->event_gndx]; + spin_lock_irqsave(&msm_dp_priv->event_lock, flag); + todo = &msm_dp_priv->event_list[msm_dp_priv->event_gndx]; if (todo->delay) { - struct dp_event *todo_next; + struct msm_dp_event *todo_next; - dp_priv->event_gndx++; - dp_priv->event_gndx %= DP_EVENT_Q_MAX; + msm_dp_priv->event_gndx++; + msm_dp_priv->event_gndx %= DP_EVENT_Q_MAX; /* re enter delay event into q */ - todo_next = &dp_priv->event_list[dp_priv->event_pndx++]; - dp_priv->event_pndx %= DP_EVENT_Q_MAX; + todo_next = &msm_dp_priv->event_list[msm_dp_priv->event_pndx++]; + msm_dp_priv->event_pndx %= DP_EVENT_Q_MAX; todo_next->event_id = todo->event_id; todo_next->data = todo->data; todo_next->delay = todo->delay - 1; @@ -1075,33 +1084,33 @@ static int hpd_event_thread(void *data) /* switch to timeout mode */ timeout_mode = 1; - spin_unlock_irqrestore(&dp_priv->event_lock, flag); + spin_unlock_irqrestore(&msm_dp_priv->event_lock, flag); continue; } /* timeout with no events in q */ - if (dp_priv->event_pndx == dp_priv->event_gndx) { - spin_unlock_irqrestore(&dp_priv->event_lock, flag); + if (msm_dp_priv->event_pndx == msm_dp_priv->event_gndx) { + spin_unlock_irqrestore(&msm_dp_priv->event_lock, flag); continue; } - dp_priv->event_gndx++; - dp_priv->event_gndx %= DP_EVENT_Q_MAX; + msm_dp_priv->event_gndx++; + msm_dp_priv->event_gndx %= DP_EVENT_Q_MAX; timeout_mode = 0; - spin_unlock_irqrestore(&dp_priv->event_lock, flag); + spin_unlock_irqrestore(&msm_dp_priv->event_lock, flag); switch (todo->event_id) { case EV_HPD_PLUG_INT: - dp_hpd_plug_handle(dp_priv, todo->data); + msm_dp_hpd_plug_handle(msm_dp_priv, todo->data); break; case EV_HPD_UNPLUG_INT: - dp_hpd_unplug_handle(dp_priv, todo->data); + msm_dp_hpd_unplug_handle(msm_dp_priv, todo->data); break; case EV_IRQ_HPD_INT: - dp_irq_hpd_handle(dp_priv, todo->data); + msm_dp_irq_hpd_handle(msm_dp_priv, todo->data); break; case EV_USER_NOTIFICATION: - dp_display_send_hpd_notification(dp_priv, + msm_dp_display_send_hpd_notification(msm_dp_priv, todo->data); break; default: @@ -1112,22 +1121,22 @@ static int hpd_event_thread(void *data) return 0; } -static int dp_hpd_event_thread_start(struct dp_display_private *dp_priv) +static int msm_dp_hpd_event_thread_start(struct msm_dp_display_private *msm_dp_priv) { /* set event q to empty */ - dp_priv->event_gndx = 0; - dp_priv->event_pndx = 0; + msm_dp_priv->event_gndx = 0; + msm_dp_priv->event_pndx = 0; - dp_priv->ev_tsk = kthread_run(hpd_event_thread, dp_priv, "dp_hpd_handler"); - if (IS_ERR(dp_priv->ev_tsk)) - return PTR_ERR(dp_priv->ev_tsk); + msm_dp_priv->ev_tsk = kthread_run(hpd_event_thread, msm_dp_priv, "dp_hpd_handler"); + if (IS_ERR(msm_dp_priv->ev_tsk)) + return PTR_ERR(msm_dp_priv->ev_tsk); return 0; } -static irqreturn_t dp_display_irq_handler(int irq, void *dev_id) +static irqreturn_t msm_dp_display_irq_handler(int irq, void *dev_id) { - struct dp_display_private *dp = dev_id; + struct msm_dp_display_private *dp = dev_id; irqreturn_t ret = IRQ_NONE; u32 hpd_isr_status; @@ -1136,43 +1145,43 @@ static irqreturn_t dp_display_irq_handler(int irq, void *dev_id) return IRQ_NONE; } - hpd_isr_status = dp_catalog_hpd_get_intr_status(dp->catalog); + hpd_isr_status = msm_dp_catalog_hpd_get_intr_status(dp->catalog); if (hpd_isr_status & 0x0F) { drm_dbg_dp(dp->drm_dev, "type=%d isr=0x%x\n", - dp->dp_display.connector_type, hpd_isr_status); + dp->msm_dp_display.connector_type, hpd_isr_status); /* hpd related interrupts */ if (hpd_isr_status & DP_DP_HPD_PLUG_INT_MASK) - dp_add_event(dp, EV_HPD_PLUG_INT, 0, 0); + msm_dp_add_event(dp, EV_HPD_PLUG_INT, 0, 0); if (hpd_isr_status & DP_DP_IRQ_HPD_INT_MASK) { - dp_add_event(dp, EV_IRQ_HPD_INT, 0, 0); + msm_dp_add_event(dp, EV_IRQ_HPD_INT, 0, 0); } if (hpd_isr_status & DP_DP_HPD_REPLUG_INT_MASK) { - dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0); - dp_add_event(dp, EV_HPD_PLUG_INT, 0, 3); + msm_dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0); + msm_dp_add_event(dp, EV_HPD_PLUG_INT, 0, 3); } if (hpd_isr_status & DP_DP_HPD_UNPLUG_INT_MASK) - dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0); + msm_dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0); ret = IRQ_HANDLED; } /* DP controller isr */ - ret |= dp_ctrl_isr(dp->ctrl); + ret |= msm_dp_ctrl_isr(dp->ctrl); /* DP aux isr */ - ret |= dp_aux_isr(dp->aux); + ret |= msm_dp_aux_isr(dp->aux); return ret; } -static int dp_display_request_irq(struct dp_display_private *dp) +static int msm_dp_display_request_irq(struct msm_dp_display_private *dp) { int rc = 0; - struct platform_device *pdev = dp->dp_display.pdev; + struct platform_device *pdev = dp->msm_dp_display.pdev; dp->irq = platform_get_irq(pdev, 0); if (dp->irq < 0) { @@ -1180,7 +1189,7 @@ static int dp_display_request_irq(struct dp_display_private *dp) return dp->irq; } - rc = devm_request_irq(&pdev->dev, dp->irq, dp_display_irq_handler, + rc = devm_request_irq(&pdev->dev, dp->irq, msm_dp_display_irq_handler, IRQF_TRIGGER_HIGH|IRQF_NO_AUTOEN, "dp_display_isr", dp); @@ -1193,7 +1202,7 @@ static int dp_display_request_irq(struct dp_display_private *dp) return 0; } -static const struct msm_dp_desc *dp_display_get_desc(struct platform_device *pdev) +static const struct msm_dp_desc *msm_dp_display_get_desc(struct platform_device *pdev) { const struct msm_dp_desc *descs = of_device_get_match_data(&pdev->dev); struct resource *res; @@ -1212,7 +1221,7 @@ static const struct msm_dp_desc *dp_display_get_desc(struct platform_device *pde return NULL; } -static int dp_display_probe_tail(struct device *dev) +static int msm_dp_display_probe_tail(struct device *dev) { struct msm_dp *dp = dev_get_drvdata(dev); int ret; @@ -1232,19 +1241,19 @@ static int dp_display_probe_tail(struct device *dev) return ret; } - ret = component_add(dev, &dp_display_comp_ops); + ret = component_add(dev, &msm_dp_display_comp_ops); if (ret) DRM_ERROR("component add failed, rc=%d\n", ret); return ret; } -static int dp_auxbus_done_probe(struct drm_dp_aux *aux) +static int msm_dp_auxbus_done_probe(struct drm_dp_aux *aux) { - return dp_display_probe_tail(aux->dev); + return msm_dp_display_probe_tail(aux->dev); } -static int dp_display_get_connector_type(struct platform_device *pdev, +static int msm_dp_display_get_connector_type(struct platform_device *pdev, const struct msm_dp_desc *desc) { struct device_node *node = pdev->dev.of_node; @@ -1263,10 +1272,10 @@ static int dp_display_get_connector_type(struct platform_device *pdev, return connector_type; } -static int dp_display_probe(struct platform_device *pdev) +static int msm_dp_display_probe(struct platform_device *pdev) { int rc = 0; - struct dp_display_private *dp; + struct msm_dp_display_private *dp; const struct msm_dp_desc *desc; if (!pdev || !pdev->dev.of_node) { @@ -1278,18 +1287,18 @@ static int dp_display_probe(struct platform_device *pdev) if (!dp) return -ENOMEM; - desc = dp_display_get_desc(pdev); + desc = msm_dp_display_get_desc(pdev); if (!desc) return -EINVAL; - dp->dp_display.pdev = pdev; + dp->msm_dp_display.pdev = pdev; dp->id = desc->id; - dp->dp_display.connector_type = dp_display_get_connector_type(pdev, desc); + dp->msm_dp_display.connector_type = msm_dp_display_get_connector_type(pdev, desc); dp->wide_bus_supported = desc->wide_bus_supported; - dp->dp_display.is_edp = - (dp->dp_display.connector_type == DRM_MODE_CONNECTOR_eDP); + dp->msm_dp_display.is_edp = + (dp->msm_dp_display.connector_type == DRM_MODE_CONNECTOR_eDP); - rc = dp_init_sub_modules(dp); + rc = msm_dp_init_sub_modules(dp); if (rc) { DRM_ERROR("init sub module failed\n"); return -EPROBE_DEFER; @@ -1301,28 +1310,28 @@ static int dp_display_probe(struct platform_device *pdev) spin_lock_init(&dp->event_lock); /* Store DP audio handle inside DP display */ - dp->dp_display.dp_audio = dp->audio; + dp->msm_dp_display.msm_dp_audio = dp->audio; init_completion(&dp->audio_comp); - platform_set_drvdata(pdev, &dp->dp_display); + platform_set_drvdata(pdev, &dp->msm_dp_display); rc = devm_pm_runtime_enable(&pdev->dev); if (rc) goto err; - rc = dp_display_request_irq(dp); + rc = msm_dp_display_request_irq(dp); if (rc) goto err; - if (dp->dp_display.is_edp) { - rc = devm_of_dp_aux_populate_bus(dp->aux, dp_auxbus_done_probe); + if (dp->msm_dp_display.is_edp) { + rc = devm_of_dp_aux_populate_bus(dp->aux, msm_dp_auxbus_done_probe); if (rc) { DRM_ERROR("eDP auxbus population failed, rc=%d\n", rc); goto err; } } else { - rc = dp_display_probe_tail(&pdev->dev); + rc = msm_dp_display_probe_tail(&pdev->dev); if (rc) goto err; } @@ -1330,70 +1339,70 @@ static int dp_display_probe(struct platform_device *pdev) return rc; err: - dp_display_deinit_sub_modules(dp); + msm_dp_display_deinit_sub_modules(dp); return rc; } -static void dp_display_remove(struct platform_device *pdev) +static void msm_dp_display_remove(struct platform_device *pdev) { - struct dp_display_private *dp = dev_get_dp_display_private(&pdev->dev); + struct msm_dp_display_private *dp = dev_get_dp_display_private(&pdev->dev); - component_del(&pdev->dev, &dp_display_comp_ops); - dp_display_deinit_sub_modules(dp); + component_del(&pdev->dev, &msm_dp_display_comp_ops); + msm_dp_display_deinit_sub_modules(dp); platform_set_drvdata(pdev, NULL); } -static int dp_pm_runtime_suspend(struct device *dev) +static int msm_dp_pm_runtime_suspend(struct device *dev) { - struct dp_display_private *dp = dev_get_dp_display_private(dev); + struct msm_dp_display_private *dp = dev_get_dp_display_private(dev); disable_irq(dp->irq); - if (dp->dp_display.is_edp) { - dp_display_host_phy_exit(dp); - dp_catalog_ctrl_hpd_disable(dp->catalog); + if (dp->msm_dp_display.is_edp) { + msm_dp_display_host_phy_exit(dp); + msm_dp_catalog_ctrl_hpd_disable(dp->catalog); } - dp_display_host_deinit(dp); + msm_dp_display_host_deinit(dp); return 0; } -static int dp_pm_runtime_resume(struct device *dev) +static int msm_dp_pm_runtime_resume(struct device *dev) { - struct dp_display_private *dp = dev_get_dp_display_private(dev); + struct msm_dp_display_private *dp = dev_get_dp_display_private(dev); /* * for eDP, host cotroller, HPD block and PHY are enabled here * but with HPD irq disabled * * for DP, only host controller is enabled here. - * HPD block is enabled at dp_bridge_hpd_enable() + * HPD block is enabled at msm_dp_bridge_hpd_enable() * PHY will be enabled at plugin handler later */ - dp_display_host_init(dp); - if (dp->dp_display.is_edp) { - dp_catalog_ctrl_hpd_enable(dp->catalog); - dp_display_host_phy_init(dp); + msm_dp_display_host_init(dp); + if (dp->msm_dp_display.is_edp) { + msm_dp_catalog_ctrl_hpd_enable(dp->catalog); + msm_dp_display_host_phy_init(dp); } enable_irq(dp->irq); return 0; } -static const struct dev_pm_ops dp_pm_ops = { - SET_RUNTIME_PM_OPS(dp_pm_runtime_suspend, dp_pm_runtime_resume, NULL) +static const struct dev_pm_ops msm_dp_pm_ops = { + SET_RUNTIME_PM_OPS(msm_dp_pm_runtime_suspend, msm_dp_pm_runtime_resume, NULL) SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) }; -static struct platform_driver dp_display_driver = { - .probe = dp_display_probe, - .remove_new = dp_display_remove, +static struct platform_driver msm_dp_display_driver = { + .probe = msm_dp_display_probe, + .remove = msm_dp_display_remove, .driver = { .name = "msm-dp-display", - .of_match_table = dp_dt_match, + .of_match_table = msm_dp_dt_match, .suppress_bind_attrs = true, - .pm = &dp_pm_ops, + .pm = &msm_dp_pm_ops, }, }; @@ -1401,7 +1410,7 @@ int __init msm_dp_register(void) { int ret; - ret = platform_driver_register(&dp_display_driver); + ret = platform_driver_register(&msm_dp_display_driver); if (ret) DRM_ERROR("Dp display driver register failed"); @@ -1410,294 +1419,294 @@ int __init msm_dp_register(void) void __exit msm_dp_unregister(void) { - platform_driver_unregister(&dp_display_driver); + platform_driver_unregister(&msm_dp_display_driver); } -bool msm_dp_is_yuv_420_enabled(const struct msm_dp *dp_display, +bool msm_dp_is_yuv_420_enabled(const struct msm_dp *msm_dp_display, const struct drm_display_mode *mode) { - struct dp_display_private *dp; + struct msm_dp_display_private *dp; const struct drm_display_info *info; - dp = container_of(dp_display, struct dp_display_private, dp_display); - info = &dp_display->connector->display_info; + dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display); + info = &msm_dp_display->connector->display_info; return dp->panel->vsc_sdp_supported && drm_mode_is_420_only(info, mode); } -bool msm_dp_needs_periph_flush(const struct msm_dp *dp_display, +bool msm_dp_needs_periph_flush(const struct msm_dp *msm_dp_display, const struct drm_display_mode *mode) { - return msm_dp_is_yuv_420_enabled(dp_display, mode); + return msm_dp_is_yuv_420_enabled(msm_dp_display, mode); } -bool msm_dp_wide_bus_available(const struct msm_dp *dp_display) +bool msm_dp_wide_bus_available(const struct msm_dp *msm_dp_display) { - struct dp_display_private *dp; + struct msm_dp_display_private *dp; - dp = container_of(dp_display, struct dp_display_private, dp_display); + dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display); - if (dp->dp_mode.out_fmt_is_yuv_420) + if (dp->msm_dp_mode.out_fmt_is_yuv_420) return false; return dp->wide_bus_supported; } -void dp_display_debugfs_init(struct msm_dp *dp_display, struct dentry *root, bool is_edp) +void msm_dp_display_debugfs_init(struct msm_dp *msm_dp_display, struct dentry *root, bool is_edp) { - struct dp_display_private *dp; + struct msm_dp_display_private *dp; struct device *dev; int rc; - dp = container_of(dp_display, struct dp_display_private, dp_display); - dev = &dp->dp_display.pdev->dev; + dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display); + dev = &dp->msm_dp_display.pdev->dev; - rc = dp_debug_init(dev, dp->panel, dp->link, dp->dp_display.connector, root, is_edp); + rc = msm_dp_debug_init(dev, dp->panel, dp->link, dp->msm_dp_display.connector, root, is_edp); if (rc) DRM_ERROR("failed to initialize debug, rc = %d\n", rc); } -int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev, +int msm_dp_modeset_init(struct msm_dp *msm_dp_display, struct drm_device *dev, struct drm_encoder *encoder, bool yuv_supported) { - struct dp_display_private *dp_priv; + struct msm_dp_display_private *msm_dp_priv; int ret; - dp_display->drm_dev = dev; + msm_dp_display->drm_dev = dev; - dp_priv = container_of(dp_display, struct dp_display_private, dp_display); + msm_dp_priv = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display); - ret = dp_bridge_init(dp_display, dev, encoder); + ret = msm_dp_bridge_init(msm_dp_display, dev, encoder, yuv_supported); if (ret) { DRM_DEV_ERROR(dev->dev, "failed to create dp bridge: %d\n", ret); return ret; } - dp_display->connector = dp_drm_connector_init(dp_display, encoder, yuv_supported); - if (IS_ERR(dp_display->connector)) { - ret = PTR_ERR(dp_display->connector); + msm_dp_display->connector = msm_dp_drm_connector_init(msm_dp_display, encoder); + if (IS_ERR(msm_dp_display->connector)) { + ret = PTR_ERR(msm_dp_display->connector); DRM_DEV_ERROR(dev->dev, "failed to create dp connector: %d\n", ret); - dp_display->connector = NULL; + msm_dp_display->connector = NULL; return ret; } - dp_priv->panel->connector = dp_display->connector; + msm_dp_priv->panel->connector = msm_dp_display->connector; return 0; } -void dp_bridge_atomic_enable(struct drm_bridge *drm_bridge, +void msm_dp_bridge_atomic_enable(struct drm_bridge *drm_bridge, struct drm_bridge_state *old_bridge_state) { - struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge); - struct msm_dp *dp = dp_bridge->dp_display; + struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(drm_bridge); + struct msm_dp *dp = msm_dp_bridge->msm_dp_display; int rc = 0; - struct dp_display_private *dp_display; + struct msm_dp_display_private *msm_dp_display; u32 state; bool force_link_train = false; - dp_display = container_of(dp, struct dp_display_private, dp_display); - if (!dp_display->dp_mode.drm_mode.clock) { + msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display); + if (!msm_dp_display->msm_dp_mode.drm_mode.clock) { DRM_ERROR("invalid params\n"); return; } if (dp->is_edp) - dp_hpd_plug_handle(dp_display, 0); + msm_dp_hpd_plug_handle(msm_dp_display, 0); - mutex_lock(&dp_display->event_mutex); + mutex_lock(&msm_dp_display->event_mutex); if (pm_runtime_resume_and_get(&dp->pdev->dev)) { DRM_ERROR("failed to pm_runtime_resume\n"); - mutex_unlock(&dp_display->event_mutex); + mutex_unlock(&msm_dp_display->event_mutex); return; } - state = dp_display->hpd_state; + state = msm_dp_display->hpd_state; if (state != ST_DISPLAY_OFF && state != ST_MAINLINK_READY) { - mutex_unlock(&dp_display->event_mutex); + mutex_unlock(&msm_dp_display->event_mutex); return; } - rc = dp_display_set_mode(dp, &dp_display->dp_mode); + rc = msm_dp_display_set_mode(dp, &msm_dp_display->msm_dp_mode); if (rc) { DRM_ERROR("Failed to perform a mode set, rc=%d\n", rc); - mutex_unlock(&dp_display->event_mutex); + mutex_unlock(&msm_dp_display->event_mutex); return; } - state = dp_display->hpd_state; + state = msm_dp_display->hpd_state; if (state == ST_DISPLAY_OFF) { - dp_display_host_phy_init(dp_display); + msm_dp_display_host_phy_init(msm_dp_display); force_link_train = true; } - dp_display_enable(dp_display, force_link_train); + msm_dp_display_enable(msm_dp_display, force_link_train); - rc = dp_display_post_enable(dp); + rc = msm_dp_display_post_enable(dp); if (rc) { DRM_ERROR("DP display post enable failed, rc=%d\n", rc); - dp_display_disable(dp_display); + msm_dp_display_disable(msm_dp_display); } /* completed connection */ - dp_display->hpd_state = ST_CONNECTED; + msm_dp_display->hpd_state = ST_CONNECTED; drm_dbg_dp(dp->drm_dev, "type=%d Done\n", dp->connector_type); - mutex_unlock(&dp_display->event_mutex); + mutex_unlock(&msm_dp_display->event_mutex); } -void dp_bridge_atomic_disable(struct drm_bridge *drm_bridge, +void msm_dp_bridge_atomic_disable(struct drm_bridge *drm_bridge, struct drm_bridge_state *old_bridge_state) { - struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge); - struct msm_dp *dp = dp_bridge->dp_display; - struct dp_display_private *dp_display; + struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(drm_bridge); + struct msm_dp *dp = msm_dp_bridge->msm_dp_display; + struct msm_dp_display_private *msm_dp_display; - dp_display = container_of(dp, struct dp_display_private, dp_display); + msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display); - dp_ctrl_push_idle(dp_display->ctrl); + msm_dp_ctrl_push_idle(msm_dp_display->ctrl); } -void dp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge, +void msm_dp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge, struct drm_bridge_state *old_bridge_state) { - struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge); - struct msm_dp *dp = dp_bridge->dp_display; + struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(drm_bridge); + struct msm_dp *dp = msm_dp_bridge->msm_dp_display; u32 state; - struct dp_display_private *dp_display; + struct msm_dp_display_private *msm_dp_display; - dp_display = container_of(dp, struct dp_display_private, dp_display); + msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display); if (dp->is_edp) - dp_hpd_unplug_handle(dp_display, 0); + msm_dp_hpd_unplug_handle(msm_dp_display, 0); - mutex_lock(&dp_display->event_mutex); + mutex_lock(&msm_dp_display->event_mutex); - state = dp_display->hpd_state; + state = msm_dp_display->hpd_state; if (state != ST_DISCONNECT_PENDING && state != ST_CONNECTED) drm_dbg_dp(dp->drm_dev, "type=%d wrong hpd_state=%d\n", dp->connector_type, state); - dp_display_disable(dp_display); + msm_dp_display_disable(msm_dp_display); - state = dp_display->hpd_state; + state = msm_dp_display->hpd_state; if (state == ST_DISCONNECT_PENDING) { /* completed disconnection */ - dp_display->hpd_state = ST_DISCONNECTED; + msm_dp_display->hpd_state = ST_DISCONNECTED; } else { - dp_display->hpd_state = ST_DISPLAY_OFF; + msm_dp_display->hpd_state = ST_DISPLAY_OFF; } drm_dbg_dp(dp->drm_dev, "type=%d Done\n", dp->connector_type); pm_runtime_put_sync(&dp->pdev->dev); - mutex_unlock(&dp_display->event_mutex); + mutex_unlock(&msm_dp_display->event_mutex); } -void dp_bridge_mode_set(struct drm_bridge *drm_bridge, +void msm_dp_bridge_mode_set(struct drm_bridge *drm_bridge, const struct drm_display_mode *mode, const struct drm_display_mode *adjusted_mode) { - struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge); - struct msm_dp *dp = dp_bridge->dp_display; - struct dp_display_private *dp_display; - struct dp_panel *dp_panel; + struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(drm_bridge); + struct msm_dp *dp = msm_dp_bridge->msm_dp_display; + struct msm_dp_display_private *msm_dp_display; + struct msm_dp_panel *msm_dp_panel; - dp_display = container_of(dp, struct dp_display_private, dp_display); - dp_panel = dp_display->panel; + msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display); + msm_dp_panel = msm_dp_display->panel; - memset(&dp_display->dp_mode, 0x0, sizeof(struct dp_display_mode)); + memset(&msm_dp_display->msm_dp_mode, 0x0, sizeof(struct msm_dp_display_mode)); - if (dp_display_check_video_test(dp)) - dp_display->dp_mode.bpp = dp_display_get_test_bpp(dp); + if (msm_dp_display_check_video_test(dp)) + msm_dp_display->msm_dp_mode.bpp = msm_dp_display_get_test_bpp(dp); else /* Default num_components per pixel = 3 */ - dp_display->dp_mode.bpp = dp->connector->display_info.bpc * 3; + msm_dp_display->msm_dp_mode.bpp = dp->connector->display_info.bpc * 3; - if (!dp_display->dp_mode.bpp) - dp_display->dp_mode.bpp = 24; /* Default bpp */ + if (!msm_dp_display->msm_dp_mode.bpp) + msm_dp_display->msm_dp_mode.bpp = 24; /* Default bpp */ - drm_mode_copy(&dp_display->dp_mode.drm_mode, adjusted_mode); + drm_mode_copy(&msm_dp_display->msm_dp_mode.drm_mode, adjusted_mode); - dp_display->dp_mode.v_active_low = - !!(dp_display->dp_mode.drm_mode.flags & DRM_MODE_FLAG_NVSYNC); + msm_dp_display->msm_dp_mode.v_active_low = + !!(msm_dp_display->msm_dp_mode.drm_mode.flags & DRM_MODE_FLAG_NVSYNC); - dp_display->dp_mode.h_active_low = - !!(dp_display->dp_mode.drm_mode.flags & DRM_MODE_FLAG_NHSYNC); + msm_dp_display->msm_dp_mode.h_active_low = + !!(msm_dp_display->msm_dp_mode.drm_mode.flags & DRM_MODE_FLAG_NHSYNC); - dp_display->dp_mode.out_fmt_is_yuv_420 = + msm_dp_display->msm_dp_mode.out_fmt_is_yuv_420 = drm_mode_is_420_only(&dp->connector->display_info, adjusted_mode) && - dp_panel->vsc_sdp_supported; + msm_dp_panel->vsc_sdp_supported; /* populate wide_bus_support to different layers */ - dp_display->ctrl->wide_bus_en = - dp_display->dp_mode.out_fmt_is_yuv_420 ? false : dp_display->wide_bus_supported; - dp_display->catalog->wide_bus_en = - dp_display->dp_mode.out_fmt_is_yuv_420 ? false : dp_display->wide_bus_supported; + msm_dp_display->ctrl->wide_bus_en = + msm_dp_display->msm_dp_mode.out_fmt_is_yuv_420 ? false : msm_dp_display->wide_bus_supported; + msm_dp_display->catalog->wide_bus_en = + msm_dp_display->msm_dp_mode.out_fmt_is_yuv_420 ? false : msm_dp_display->wide_bus_supported; } -void dp_bridge_hpd_enable(struct drm_bridge *bridge) +void msm_dp_bridge_hpd_enable(struct drm_bridge *bridge) { - struct msm_dp_bridge *dp_bridge = to_dp_bridge(bridge); - struct msm_dp *dp_display = dp_bridge->dp_display; - struct dp_display_private *dp = container_of(dp_display, struct dp_display_private, dp_display); + struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(bridge); + struct msm_dp *msm_dp_display = msm_dp_bridge->msm_dp_display; + struct msm_dp_display_private *dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display); /* * this is for external DP with hpd irq enabled case, - * step-1: dp_pm_runtime_resume() enable dp host only + * step-1: msm_dp_pm_runtime_resume() enable dp host only * step-2: enable hdp block and have hpd irq enabled here * step-3: waiting for plugin irq while phy is not initialized * step-4: DP PHY is initialized at plugin handler before link training * */ mutex_lock(&dp->event_mutex); - if (pm_runtime_resume_and_get(&dp_display->pdev->dev)) { + if (pm_runtime_resume_and_get(&msm_dp_display->pdev->dev)) { DRM_ERROR("failed to resume power\n"); mutex_unlock(&dp->event_mutex); return; } - dp_catalog_ctrl_hpd_enable(dp->catalog); + msm_dp_catalog_ctrl_hpd_enable(dp->catalog); /* enable HDP interrupts */ - dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, true); + msm_dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, true); - dp_display->internal_hpd = true; + msm_dp_display->internal_hpd = true; mutex_unlock(&dp->event_mutex); } -void dp_bridge_hpd_disable(struct drm_bridge *bridge) +void msm_dp_bridge_hpd_disable(struct drm_bridge *bridge) { - struct msm_dp_bridge *dp_bridge = to_dp_bridge(bridge); - struct msm_dp *dp_display = dp_bridge->dp_display; - struct dp_display_private *dp = container_of(dp_display, struct dp_display_private, dp_display); + struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(bridge); + struct msm_dp *msm_dp_display = msm_dp_bridge->msm_dp_display; + struct msm_dp_display_private *dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display); mutex_lock(&dp->event_mutex); /* disable HDP interrupts */ - dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false); - dp_catalog_ctrl_hpd_disable(dp->catalog); + msm_dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false); + msm_dp_catalog_ctrl_hpd_disable(dp->catalog); - dp_display->internal_hpd = false; + msm_dp_display->internal_hpd = false; - pm_runtime_put_sync(&dp_display->pdev->dev); + pm_runtime_put_sync(&msm_dp_display->pdev->dev); mutex_unlock(&dp->event_mutex); } -void dp_bridge_hpd_notify(struct drm_bridge *bridge, +void msm_dp_bridge_hpd_notify(struct drm_bridge *bridge, enum drm_connector_status status) { - struct msm_dp_bridge *dp_bridge = to_dp_bridge(bridge); - struct msm_dp *dp_display = dp_bridge->dp_display; - struct dp_display_private *dp = container_of(dp_display, struct dp_display_private, dp_display); + struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(bridge); + struct msm_dp *msm_dp_display = msm_dp_bridge->msm_dp_display; + struct msm_dp_display_private *dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display); /* Without next_bridge interrupts are handled by the DP core directly */ - if (dp_display->internal_hpd) + if (msm_dp_display->internal_hpd) return; - if (!dp_display->link_ready && status == connector_status_connected) - dp_add_event(dp, EV_HPD_PLUG_INT, 0, 0); - else if (dp_display->link_ready && status == connector_status_disconnected) - dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0); + if (!msm_dp_display->link_ready && status == connector_status_connected) + msm_dp_add_event(dp, EV_HPD_PLUG_INT, 0, 0); + else if (msm_dp_display->link_ready && status == connector_status_disconnected) + msm_dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0); } diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h index ec7fa67e0569..ecbc2d92f546 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.h +++ b/drivers/gpu/drm/msm/dp/dp_display.h @@ -27,18 +27,18 @@ struct msm_dp { hdmi_codec_plugged_cb plugged_cb; - struct dp_audio *dp_audio; + struct msm_dp_audio *msm_dp_audio; bool psr_supported; }; -int dp_display_set_plugged_cb(struct msm_dp *dp_display, +int msm_dp_display_set_plugged_cb(struct msm_dp *msm_dp_display, hdmi_codec_plugged_cb fn, struct device *codec_dev); -int dp_display_get_modes(struct msm_dp *dp_display); -bool dp_display_check_video_test(struct msm_dp *dp_display); -int dp_display_get_test_bpp(struct msm_dp *dp_display); -void dp_display_signal_audio_start(struct msm_dp *dp_display); -void dp_display_signal_audio_complete(struct msm_dp *dp_display); -void dp_display_set_psr(struct msm_dp *dp, bool enter); -void dp_display_debugfs_init(struct msm_dp *dp_display, struct dentry *dentry, bool is_edp); +int msm_dp_display_get_modes(struct msm_dp *msm_dp_display); +bool msm_dp_display_check_video_test(struct msm_dp *msm_dp_display); +int msm_dp_display_get_test_bpp(struct msm_dp *msm_dp_display); +void msm_dp_display_signal_audio_start(struct msm_dp *msm_dp_display); +void msm_dp_display_signal_audio_complete(struct msm_dp *msm_dp_display); +void msm_dp_display_set_psr(struct msm_dp *dp, bool enter); +void msm_dp_display_debugfs_init(struct msm_dp *msm_dp_display, struct dentry *dentry, bool is_edp); #endif /* _DP_DISPLAY_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c index 1b9be5bd97f1..d3e241ea6941 100644 --- a/drivers/gpu/drm/msm/dp/dp_drm.c +++ b/drivers/gpu/drm/msm/dp/dp_drm.c @@ -14,15 +14,15 @@ #include "dp_drm.h" /** - * dp_bridge_detect - callback to determine if connector is connected + * msm_dp_bridge_detect - callback to determine if connector is connected * @bridge: Pointer to drm bridge structure * Returns: Bridge's 'is connected' status */ -static enum drm_connector_status dp_bridge_detect(struct drm_bridge *bridge) +static enum drm_connector_status msm_dp_bridge_detect(struct drm_bridge *bridge) { struct msm_dp *dp; - dp = to_dp_bridge(bridge)->dp_display; + dp = to_dp_bridge(bridge)->msm_dp_display; drm_dbg_dp(dp->drm_dev, "link_ready = %s\n", (dp->link_ready) ? "true" : "false"); @@ -31,14 +31,14 @@ static enum drm_connector_status dp_bridge_detect(struct drm_bridge *bridge) connector_status_disconnected; } -static int dp_bridge_atomic_check(struct drm_bridge *bridge, +static int msm_dp_bridge_atomic_check(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { struct msm_dp *dp; - dp = to_dp_bridge(bridge)->dp_display; + dp = to_dp_bridge(bridge)->msm_dp_display; drm_dbg_dp(dp->drm_dev, "link_ready = %s\n", (dp->link_ready) ? "true" : "false"); @@ -62,12 +62,12 @@ static int dp_bridge_atomic_check(struct drm_bridge *bridge, /** - * dp_bridge_get_modes - callback to add drm modes via drm_mode_probed_add() + * msm_dp_bridge_get_modes - callback to add drm modes via drm_mode_probed_add() * @bridge: Poiner to drm bridge * @connector: Pointer to drm connector structure * Returns: Number of modes added */ -static int dp_bridge_get_modes(struct drm_bridge *bridge, struct drm_connector *connector) +static int msm_dp_bridge_get_modes(struct drm_bridge *bridge, struct drm_connector *connector) { int rc = 0; struct msm_dp *dp; @@ -75,11 +75,11 @@ static int dp_bridge_get_modes(struct drm_bridge *bridge, struct drm_connector * if (!connector) return 0; - dp = to_dp_bridge(bridge)->dp_display; + dp = to_dp_bridge(bridge)->msm_dp_display; /* pluggable case assumes EDID is read when HPD */ if (dp->link_ready) { - rc = dp_display_get_modes(dp); + rc = msm_dp_display_get_modes(dp); if (rc <= 0) { DRM_ERROR("failed to get DP sink modes, rc=%d\n", rc); return rc; @@ -90,37 +90,37 @@ static int dp_bridge_get_modes(struct drm_bridge *bridge, struct drm_connector * return rc; } -static void dp_bridge_debugfs_init(struct drm_bridge *bridge, struct dentry *root) +static void msm_dp_bridge_debugfs_init(struct drm_bridge *bridge, struct dentry *root) { - struct msm_dp *dp = to_dp_bridge(bridge)->dp_display; + struct msm_dp *dp = to_dp_bridge(bridge)->msm_dp_display; - dp_display_debugfs_init(dp, root, false); + msm_dp_display_debugfs_init(dp, root, false); } -static const struct drm_bridge_funcs dp_bridge_ops = { +static const struct drm_bridge_funcs msm_dp_bridge_ops = { .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .atomic_reset = drm_atomic_helper_bridge_reset, - .atomic_enable = dp_bridge_atomic_enable, - .atomic_disable = dp_bridge_atomic_disable, - .atomic_post_disable = dp_bridge_atomic_post_disable, - .mode_set = dp_bridge_mode_set, - .mode_valid = dp_bridge_mode_valid, - .get_modes = dp_bridge_get_modes, - .detect = dp_bridge_detect, - .atomic_check = dp_bridge_atomic_check, - .hpd_enable = dp_bridge_hpd_enable, - .hpd_disable = dp_bridge_hpd_disable, - .hpd_notify = dp_bridge_hpd_notify, - .debugfs_init = dp_bridge_debugfs_init, + .atomic_enable = msm_dp_bridge_atomic_enable, + .atomic_disable = msm_dp_bridge_atomic_disable, + .atomic_post_disable = msm_dp_bridge_atomic_post_disable, + .mode_set = msm_dp_bridge_mode_set, + .mode_valid = msm_dp_bridge_mode_valid, + .get_modes = msm_dp_bridge_get_modes, + .detect = msm_dp_bridge_detect, + .atomic_check = msm_dp_bridge_atomic_check, + .hpd_enable = msm_dp_bridge_hpd_enable, + .hpd_disable = msm_dp_bridge_hpd_disable, + .hpd_notify = msm_dp_bridge_hpd_notify, + .debugfs_init = msm_dp_bridge_debugfs_init, }; -static int edp_bridge_atomic_check(struct drm_bridge *drm_bridge, +static int msm_edp_bridge_atomic_check(struct drm_bridge *drm_bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { - struct msm_dp *dp = to_dp_bridge(drm_bridge)->dp_display; + struct msm_dp *dp = to_dp_bridge(drm_bridge)->msm_dp_display; if (WARN_ON(!conn_state)) return -ENODEV; @@ -136,18 +136,18 @@ static int edp_bridge_atomic_check(struct drm_bridge *drm_bridge, return 0; } -static void edp_bridge_atomic_enable(struct drm_bridge *drm_bridge, +static void msm_edp_bridge_atomic_enable(struct drm_bridge *drm_bridge, struct drm_bridge_state *old_bridge_state) { struct drm_atomic_state *atomic_state = old_bridge_state->base.state; struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state; - struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge); - struct msm_dp *dp = dp_bridge->dp_display; + struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(drm_bridge); + struct msm_dp *dp = msm_dp_bridge->msm_dp_display; /* * Check the old state of the crtc to determine if the panel - * was put into psr state previously by the edp_bridge_atomic_disable. + * was put into psr state previously by the msm_edp_bridge_atomic_disable. * If the panel is in psr, just exit psr state and skip the full * bridge enable sequence. */ @@ -159,21 +159,21 @@ static void edp_bridge_atomic_enable(struct drm_bridge *drm_bridge, old_crtc_state = drm_atomic_get_old_crtc_state(atomic_state, crtc); if (old_crtc_state && old_crtc_state->self_refresh_active) { - dp_display_set_psr(dp, false); + msm_dp_display_set_psr(dp, false); return; } - dp_bridge_atomic_enable(drm_bridge, old_bridge_state); + msm_dp_bridge_atomic_enable(drm_bridge, old_bridge_state); } -static void edp_bridge_atomic_disable(struct drm_bridge *drm_bridge, +static void msm_edp_bridge_atomic_disable(struct drm_bridge *drm_bridge, struct drm_bridge_state *old_bridge_state) { struct drm_atomic_state *atomic_state = old_bridge_state->base.state; struct drm_crtc *crtc; struct drm_crtc_state *new_crtc_state = NULL, *old_crtc_state = NULL; - struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge); - struct msm_dp *dp = dp_bridge->dp_display; + struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(drm_bridge); + struct msm_dp *dp = msm_dp_bridge->msm_dp_display; crtc = drm_atomic_get_old_crtc_for_encoder(atomic_state, drm_bridge->encoder); @@ -194,24 +194,24 @@ static void edp_bridge_atomic_disable(struct drm_bridge *drm_bridge, * If old crtc state is active, then this is a display disable * call while the sink is in psr state. So, exit psr here. * The eDP controller will be disabled in the - * edp_bridge_atomic_post_disable function. + * msm_edp_bridge_atomic_post_disable function. * * We observed sink is stuck in self refresh if psr exit is skipped * when display disable occurs while the sink is in psr state. */ if (new_crtc_state->self_refresh_active) { - dp_display_set_psr(dp, true); + msm_dp_display_set_psr(dp, true); return; } else if (old_crtc_state->self_refresh_active) { - dp_display_set_psr(dp, false); + msm_dp_display_set_psr(dp, false); return; } out: - dp_bridge_atomic_disable(drm_bridge, old_bridge_state); + msm_dp_bridge_atomic_disable(drm_bridge, old_bridge_state); } -static void edp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge, +static void msm_edp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge, struct drm_bridge_state *old_bridge_state) { struct drm_atomic_state *atomic_state = old_bridge_state->base.state; @@ -228,29 +228,29 @@ static void edp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge, return; /* - * Self refresh mode is already set in edp_bridge_atomic_disable. + * Self refresh mode is already set in msm_edp_bridge_atomic_disable. */ if (new_crtc_state->self_refresh_active) return; - dp_bridge_atomic_post_disable(drm_bridge, old_bridge_state); + msm_dp_bridge_atomic_post_disable(drm_bridge, old_bridge_state); } /** - * edp_bridge_mode_valid - callback to determine if specified mode is valid + * msm_edp_bridge_mode_valid - callback to determine if specified mode is valid * @bridge: Pointer to drm bridge structure * @info: display info * @mode: Pointer to drm mode structure * Returns: Validity status for specified mode */ -static enum drm_mode_status edp_bridge_mode_valid(struct drm_bridge *bridge, +static enum drm_mode_status msm_edp_bridge_mode_valid(struct drm_bridge *bridge, const struct drm_display_info *info, const struct drm_display_mode *mode) { struct msm_dp *dp; int mode_pclk_khz = mode->clock; - dp = to_dp_bridge(bridge)->dp_display; + dp = to_dp_bridge(bridge)->msm_dp_display; if (!dp || !mode_pclk_khz || !dp->connector) { DRM_ERROR("invalid params\n"); @@ -268,42 +268,43 @@ static enum drm_mode_status edp_bridge_mode_valid(struct drm_bridge *bridge, return MODE_OK; } -static void edp_bridge_debugfs_init(struct drm_bridge *bridge, struct dentry *root) +static void msm_edp_bridge_debugfs_init(struct drm_bridge *bridge, struct dentry *root) { - struct msm_dp *dp = to_dp_bridge(bridge)->dp_display; + struct msm_dp *dp = to_dp_bridge(bridge)->msm_dp_display; - dp_display_debugfs_init(dp, root, true); + msm_dp_display_debugfs_init(dp, root, true); } -static const struct drm_bridge_funcs edp_bridge_ops = { - .atomic_enable = edp_bridge_atomic_enable, - .atomic_disable = edp_bridge_atomic_disable, - .atomic_post_disable = edp_bridge_atomic_post_disable, - .mode_set = dp_bridge_mode_set, - .mode_valid = edp_bridge_mode_valid, +static const struct drm_bridge_funcs msm_edp_bridge_ops = { + .atomic_enable = msm_edp_bridge_atomic_enable, + .atomic_disable = msm_edp_bridge_atomic_disable, + .atomic_post_disable = msm_edp_bridge_atomic_post_disable, + .mode_set = msm_dp_bridge_mode_set, + .mode_valid = msm_edp_bridge_mode_valid, .atomic_reset = drm_atomic_helper_bridge_reset, .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, - .atomic_check = edp_bridge_atomic_check, - .debugfs_init = edp_bridge_debugfs_init, + .atomic_check = msm_edp_bridge_atomic_check, + .debugfs_init = msm_edp_bridge_debugfs_init, }; -int dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev, - struct drm_encoder *encoder) +int msm_dp_bridge_init(struct msm_dp *msm_dp_display, struct drm_device *dev, + struct drm_encoder *encoder, bool yuv_supported) { int rc; - struct msm_dp_bridge *dp_bridge; + struct msm_dp_bridge *msm_dp_bridge; struct drm_bridge *bridge; - dp_bridge = devm_kzalloc(dev->dev, sizeof(*dp_bridge), GFP_KERNEL); - if (!dp_bridge) + msm_dp_bridge = devm_kzalloc(dev->dev, sizeof(*msm_dp_bridge), GFP_KERNEL); + if (!msm_dp_bridge) return -ENOMEM; - dp_bridge->dp_display = dp_display; + msm_dp_bridge->msm_dp_display = msm_dp_display; - bridge = &dp_bridge->bridge; - bridge->funcs = dp_display->is_edp ? &edp_bridge_ops : &dp_bridge_ops; - bridge->type = dp_display->connector_type; + bridge = &msm_dp_bridge->bridge; + bridge->funcs = msm_dp_display->is_edp ? &msm_edp_bridge_ops : &msm_dp_bridge_ops; + bridge->type = msm_dp_display->connector_type; + bridge->ycbcr_420_allowed = yuv_supported; /* * Many ops only make sense for DP. Why? @@ -316,7 +317,7 @@ int dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev, * allows the panel driver to properly power itself on to read the * modes. */ - if (!dp_display->is_edp) { + if (!msm_dp_display->is_edp) { bridge->ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_HPD | @@ -337,9 +338,9 @@ int dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev, return rc; } - if (dp_display->next_bridge) { + if (msm_dp_display->next_bridge) { rc = drm_bridge_attach(encoder, - dp_display->next_bridge, bridge, + msm_dp_display->next_bridge, bridge, DRM_BRIDGE_ATTACH_NO_CONNECTOR); if (rc < 0) { DRM_ERROR("failed to attach panel bridge: %d\n", rc); @@ -351,21 +352,18 @@ int dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev, } /* connector initialization */ -struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display, struct drm_encoder *encoder, - bool yuv_supported) +struct drm_connector *msm_dp_drm_connector_init(struct msm_dp *msm_dp_display, + struct drm_encoder *encoder) { struct drm_connector *connector = NULL; - connector = drm_bridge_connector_init(dp_display->drm_dev, encoder); + connector = drm_bridge_connector_init(msm_dp_display->drm_dev, encoder); if (IS_ERR(connector)) return connector; - if (!dp_display->is_edp) + if (!msm_dp_display->is_edp) drm_connector_attach_dp_subconnector_property(connector); - if (yuv_supported) - connector->ycbcr_420_allowed = true; - drm_connector_attach_encoder(connector, encoder); return connector; diff --git a/drivers/gpu/drm/msm/dp/dp_drm.h b/drivers/gpu/drm/msm/dp/dp_drm.h index 45e57ac25a4d..8eae2f74839f 100644 --- a/drivers/gpu/drm/msm/dp/dp_drm.h +++ b/drivers/gpu/drm/msm/dp/dp_drm.h @@ -14,31 +14,32 @@ struct msm_dp_bridge { struct drm_bridge bridge; - struct msm_dp *dp_display; + struct msm_dp *msm_dp_display; }; #define to_dp_bridge(x) container_of((x), struct msm_dp_bridge, bridge) -struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display, struct drm_encoder *encoder, - bool yuv_supported); -int dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev, - struct drm_encoder *encoder); +struct drm_connector *msm_dp_drm_connector_init(struct msm_dp *msm_dp_display, + struct drm_encoder *encoder); +int msm_dp_bridge_init(struct msm_dp *msm_dp_display, struct drm_device *dev, + struct drm_encoder *encoder, + bool yuv_supported); -void dp_bridge_atomic_enable(struct drm_bridge *drm_bridge, +void msm_dp_bridge_atomic_enable(struct drm_bridge *drm_bridge, struct drm_bridge_state *old_bridge_state); -void dp_bridge_atomic_disable(struct drm_bridge *drm_bridge, +void msm_dp_bridge_atomic_disable(struct drm_bridge *drm_bridge, struct drm_bridge_state *old_bridge_state); -void dp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge, +void msm_dp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge, struct drm_bridge_state *old_bridge_state); -enum drm_mode_status dp_bridge_mode_valid(struct drm_bridge *bridge, +enum drm_mode_status msm_dp_bridge_mode_valid(struct drm_bridge *bridge, const struct drm_display_info *info, const struct drm_display_mode *mode); -void dp_bridge_mode_set(struct drm_bridge *drm_bridge, +void msm_dp_bridge_mode_set(struct drm_bridge *drm_bridge, const struct drm_display_mode *mode, const struct drm_display_mode *adjusted_mode); -void dp_bridge_hpd_enable(struct drm_bridge *bridge); -void dp_bridge_hpd_disable(struct drm_bridge *bridge); -void dp_bridge_hpd_notify(struct drm_bridge *bridge, +void msm_dp_bridge_hpd_enable(struct drm_bridge *bridge); +void msm_dp_bridge_hpd_disable(struct drm_bridge *bridge); +void msm_dp_bridge_hpd_notify(struct drm_bridge *bridge, enum drm_connector_status status); #endif /* _DP_DRM_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c index d8967615d84d..1a1fbb2d7d4f 100644 --- a/drivers/gpu/drm/msm/dp/dp_link.c +++ b/drivers/gpu/drm/msm/dp/dp_link.c @@ -28,25 +28,25 @@ enum audio_pattern_type { AUDIO_TEST_PATTERN_SAWTOOTH = 0x01, }; -struct dp_link_request { +struct msm_dp_link_request { u32 test_requested; u32 test_link_rate; u32 test_lane_count; }; -struct dp_link_private { +struct msm_dp_link_private { u32 prev_sink_count; struct drm_device *drm_dev; struct drm_dp_aux *aux; - struct dp_link dp_link; + struct msm_dp_link msm_dp_link; - struct dp_link_request request; + struct msm_dp_link_request request; struct mutex psm_mutex; u8 link_status[DP_LINK_STATUS_SIZE]; }; -static int dp_aux_link_power_up(struct drm_dp_aux *aux, - struct dp_link_info *link) +static int msm_dp_aux_link_power_up(struct drm_dp_aux *aux, + struct msm_dp_link_info *link) { u8 value; ssize_t len; @@ -73,8 +73,8 @@ static int dp_aux_link_power_up(struct drm_dp_aux *aux, return 0; } -static int dp_aux_link_power_down(struct drm_dp_aux *aux, - struct dp_link_info *link) +static int msm_dp_aux_link_power_down(struct drm_dp_aux *aux, + struct msm_dp_link_info *link) { u8 value; int err; @@ -96,7 +96,7 @@ static int dp_aux_link_power_down(struct drm_dp_aux *aux, return 0; } -static int dp_link_get_period(struct dp_link_private *link, int const addr) +static int msm_dp_link_get_period(struct msm_dp_link_private *link, int const addr) { int ret = 0; u8 data; @@ -122,19 +122,19 @@ exit: return ret; } -static int dp_link_parse_audio_channel_period(struct dp_link_private *link) +static int msm_dp_link_parse_audio_channel_period(struct msm_dp_link_private *link) { int ret = 0; - struct dp_link_test_audio *req = &link->dp_link.test_audio; + struct msm_dp_link_test_audio *req = &link->msm_dp_link.test_audio; - ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH1); + ret = msm_dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH1); if (ret == -EINVAL) goto exit; req->test_audio_period_ch_1 = ret; drm_dbg_dp(link->drm_dev, "test_audio_period_ch_1 = 0x%x\n", ret); - ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH2); + ret = msm_dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH2); if (ret == -EINVAL) goto exit; @@ -142,42 +142,42 @@ static int dp_link_parse_audio_channel_period(struct dp_link_private *link) drm_dbg_dp(link->drm_dev, "test_audio_period_ch_2 = 0x%x\n", ret); /* TEST_AUDIO_PERIOD_CH_3 (Byte 0x275) */ - ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH3); + ret = msm_dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH3); if (ret == -EINVAL) goto exit; req->test_audio_period_ch_3 = ret; drm_dbg_dp(link->drm_dev, "test_audio_period_ch_3 = 0x%x\n", ret); - ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH4); + ret = msm_dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH4); if (ret == -EINVAL) goto exit; req->test_audio_period_ch_4 = ret; drm_dbg_dp(link->drm_dev, "test_audio_period_ch_4 = 0x%x\n", ret); - ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH5); + ret = msm_dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH5); if (ret == -EINVAL) goto exit; req->test_audio_period_ch_5 = ret; drm_dbg_dp(link->drm_dev, "test_audio_period_ch_5 = 0x%x\n", ret); - ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH6); + ret = msm_dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH6); if (ret == -EINVAL) goto exit; req->test_audio_period_ch_6 = ret; drm_dbg_dp(link->drm_dev, "test_audio_period_ch_6 = 0x%x\n", ret); - ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH7); + ret = msm_dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH7); if (ret == -EINVAL) goto exit; req->test_audio_period_ch_7 = ret; drm_dbg_dp(link->drm_dev, "test_audio_period_ch_7 = 0x%x\n", ret); - ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH8); + ret = msm_dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH8); if (ret == -EINVAL) goto exit; @@ -187,7 +187,7 @@ exit: return ret; } -static int dp_link_parse_audio_pattern_type(struct dp_link_private *link) +static int msm_dp_link_parse_audio_pattern_type(struct msm_dp_link_private *link) { int ret = 0; u8 data; @@ -208,13 +208,13 @@ static int dp_link_parse_audio_pattern_type(struct dp_link_private *link) goto exit; } - link->dp_link.test_audio.test_audio_pattern_type = data; + link->msm_dp_link.test_audio.test_audio_pattern_type = data; drm_dbg_dp(link->drm_dev, "audio pattern type = 0x%x\n", data); exit: return ret; } -static int dp_link_parse_audio_mode(struct dp_link_private *link) +static int msm_dp_link_parse_audio_mode(struct msm_dp_link_private *link) { int ret = 0; u8 data; @@ -248,8 +248,8 @@ static int dp_link_parse_audio_mode(struct dp_link_private *link) goto exit; } - link->dp_link.test_audio.test_audio_sampling_rate = sampling_rate; - link->dp_link.test_audio.test_audio_channel_count = channel_count; + link->msm_dp_link.test_audio.test_audio_sampling_rate = sampling_rate; + link->msm_dp_link.test_audio.test_audio_channel_count = channel_count; drm_dbg_dp(link->drm_dev, "sampling_rate = 0x%x, channel_count = 0x%x\n", sampling_rate, channel_count); @@ -257,25 +257,25 @@ exit: return ret; } -static int dp_link_parse_audio_pattern_params(struct dp_link_private *link) +static int msm_dp_link_parse_audio_pattern_params(struct msm_dp_link_private *link) { int ret = 0; - ret = dp_link_parse_audio_mode(link); + ret = msm_dp_link_parse_audio_mode(link); if (ret) goto exit; - ret = dp_link_parse_audio_pattern_type(link); + ret = msm_dp_link_parse_audio_pattern_type(link); if (ret) goto exit; - ret = dp_link_parse_audio_channel_period(link); + ret = msm_dp_link_parse_audio_channel_period(link); exit: return ret; } -static bool dp_link_is_video_pattern_valid(u32 pattern) +static bool msm_dp_link_is_video_pattern_valid(u32 pattern) { switch (pattern) { case DP_NO_TEST_PATTERN: @@ -289,12 +289,12 @@ static bool dp_link_is_video_pattern_valid(u32 pattern) } /** - * dp_link_is_bit_depth_valid() - validates the bit depth requested + * msm_dp_link_is_bit_depth_valid() - validates the bit depth requested * @tbd: bit depth requested by the sink * * Returns true if the requested bit depth is supported. */ -static bool dp_link_is_bit_depth_valid(u32 tbd) +static bool msm_dp_link_is_bit_depth_valid(u32 tbd) { /* DP_TEST_VIDEO_PATTERN_NONE is treated as invalid */ switch (tbd) { @@ -307,7 +307,7 @@ static bool dp_link_is_bit_depth_valid(u32 tbd) } } -static int dp_link_parse_timing_params1(struct dp_link_private *link, +static int msm_dp_link_parse_timing_params1(struct msm_dp_link_private *link, int addr, int len, u32 *val) { u8 bp[2]; @@ -328,7 +328,7 @@ static int dp_link_parse_timing_params1(struct dp_link_private *link, return 0; } -static int dp_link_parse_timing_params2(struct dp_link_private *link, +static int msm_dp_link_parse_timing_params2(struct msm_dp_link_private *link, int addr, int len, u32 *val1, u32 *val2) { @@ -351,7 +351,7 @@ static int dp_link_parse_timing_params2(struct dp_link_private *link, return 0; } -static int dp_link_parse_timing_params3(struct dp_link_private *link, +static int msm_dp_link_parse_timing_params3(struct msm_dp_link_private *link, int addr, u32 *val) { u8 bp; @@ -369,13 +369,13 @@ static int dp_link_parse_timing_params3(struct dp_link_private *link, } /** - * dp_link_parse_video_pattern_params() - parses video pattern parameters from DPCD + * msm_dp_link_parse_video_pattern_params() - parses video pattern parameters from DPCD * @link: Display Port Driver data * * Returns 0 if it successfully parses the video link pattern and the link * bit depth requested by the sink and, and if the values parsed are valid. */ -static int dp_link_parse_video_pattern_params(struct dp_link_private *link) +static int msm_dp_link_parse_video_pattern_params(struct msm_dp_link_private *link) { int ret = 0; ssize_t rlen; @@ -388,13 +388,13 @@ static int dp_link_parse_video_pattern_params(struct dp_link_private *link) return rlen; } - if (!dp_link_is_video_pattern_valid(bp)) { + if (!msm_dp_link_is_video_pattern_valid(bp)) { DRM_ERROR("invalid link video pattern = 0x%x\n", bp); ret = -EINVAL; return ret; } - link->dp_link.test_video.test_video_pattern = bp; + link->msm_dp_link.test_video.test_video_pattern = bp; /* Read the requested color bit depth and dynamic range (Byte 0x232) */ rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_MISC0, &bp); @@ -404,88 +404,88 @@ static int dp_link_parse_video_pattern_params(struct dp_link_private *link) } /* Dynamic Range */ - link->dp_link.test_video.test_dyn_range = + link->msm_dp_link.test_video.test_dyn_range = (bp & DP_TEST_DYNAMIC_RANGE_CEA); /* Color bit depth */ bp &= DP_TEST_BIT_DEPTH_MASK; - if (!dp_link_is_bit_depth_valid(bp)) { + if (!msm_dp_link_is_bit_depth_valid(bp)) { DRM_ERROR("invalid link bit depth = 0x%x\n", bp); ret = -EINVAL; return ret; } - link->dp_link.test_video.test_bit_depth = bp; + link->msm_dp_link.test_video.test_bit_depth = bp; /* resolution timing params */ - ret = dp_link_parse_timing_params1(link, DP_TEST_H_TOTAL_HI, 2, - &link->dp_link.test_video.test_h_total); + ret = msm_dp_link_parse_timing_params1(link, DP_TEST_H_TOTAL_HI, 2, + &link->msm_dp_link.test_video.test_h_total); if (ret) { DRM_ERROR("failed to parse test_htotal(DP_TEST_H_TOTAL_HI)\n"); return ret; } - ret = dp_link_parse_timing_params1(link, DP_TEST_V_TOTAL_HI, 2, - &link->dp_link.test_video.test_v_total); + ret = msm_dp_link_parse_timing_params1(link, DP_TEST_V_TOTAL_HI, 2, + &link->msm_dp_link.test_video.test_v_total); if (ret) { DRM_ERROR("failed to parse test_v_total(DP_TEST_V_TOTAL_HI)\n"); return ret; } - ret = dp_link_parse_timing_params1(link, DP_TEST_H_START_HI, 2, - &link->dp_link.test_video.test_h_start); + ret = msm_dp_link_parse_timing_params1(link, DP_TEST_H_START_HI, 2, + &link->msm_dp_link.test_video.test_h_start); if (ret) { DRM_ERROR("failed to parse test_h_start(DP_TEST_H_START_HI)\n"); return ret; } - ret = dp_link_parse_timing_params1(link, DP_TEST_V_START_HI, 2, - &link->dp_link.test_video.test_v_start); + ret = msm_dp_link_parse_timing_params1(link, DP_TEST_V_START_HI, 2, + &link->msm_dp_link.test_video.test_v_start); if (ret) { DRM_ERROR("failed to parse test_v_start(DP_TEST_V_START_HI)\n"); return ret; } - ret = dp_link_parse_timing_params2(link, DP_TEST_HSYNC_HI, 2, - &link->dp_link.test_video.test_hsync_pol, - &link->dp_link.test_video.test_hsync_width); + ret = msm_dp_link_parse_timing_params2(link, DP_TEST_HSYNC_HI, 2, + &link->msm_dp_link.test_video.test_hsync_pol, + &link->msm_dp_link.test_video.test_hsync_width); if (ret) { DRM_ERROR("failed to parse (DP_TEST_HSYNC_HI)\n"); return ret; } - ret = dp_link_parse_timing_params2(link, DP_TEST_VSYNC_HI, 2, - &link->dp_link.test_video.test_vsync_pol, - &link->dp_link.test_video.test_vsync_width); + ret = msm_dp_link_parse_timing_params2(link, DP_TEST_VSYNC_HI, 2, + &link->msm_dp_link.test_video.test_vsync_pol, + &link->msm_dp_link.test_video.test_vsync_width); if (ret) { DRM_ERROR("failed to parse (DP_TEST_VSYNC_HI)\n"); return ret; } - ret = dp_link_parse_timing_params1(link, DP_TEST_H_WIDTH_HI, 2, - &link->dp_link.test_video.test_h_width); + ret = msm_dp_link_parse_timing_params1(link, DP_TEST_H_WIDTH_HI, 2, + &link->msm_dp_link.test_video.test_h_width); if (ret) { DRM_ERROR("failed to parse test_h_width(DP_TEST_H_WIDTH_HI)\n"); return ret; } - ret = dp_link_parse_timing_params1(link, DP_TEST_V_HEIGHT_HI, 2, - &link->dp_link.test_video.test_v_height); + ret = msm_dp_link_parse_timing_params1(link, DP_TEST_V_HEIGHT_HI, 2, + &link->msm_dp_link.test_video.test_v_height); if (ret) { DRM_ERROR("failed to parse test_v_height\n"); return ret; } - ret = dp_link_parse_timing_params3(link, DP_TEST_MISC1, - &link->dp_link.test_video.test_rr_d); - link->dp_link.test_video.test_rr_d &= DP_TEST_REFRESH_DENOMINATOR; + ret = msm_dp_link_parse_timing_params3(link, DP_TEST_MISC1, + &link->msm_dp_link.test_video.test_rr_d); + link->msm_dp_link.test_video.test_rr_d &= DP_TEST_REFRESH_DENOMINATOR; if (ret) { DRM_ERROR("failed to parse test_rr_d (DP_TEST_MISC1)\n"); return ret; } - ret = dp_link_parse_timing_params3(link, DP_TEST_REFRESH_RATE_NUMERATOR, - &link->dp_link.test_video.test_rr_n); + ret = msm_dp_link_parse_timing_params3(link, DP_TEST_REFRESH_RATE_NUMERATOR, + &link->msm_dp_link.test_video.test_rr_n); if (ret) { DRM_ERROR("failed to parse test_rr_n\n"); return ret; @@ -505,34 +505,34 @@ static int dp_link_parse_video_pattern_params(struct dp_link_private *link) "TEST_V_HEIGHT = %d\n" "TEST_REFRESH_DENOMINATOR = %d\n" "TEST_REFRESH_NUMERATOR = %d\n", - link->dp_link.test_video.test_video_pattern, - link->dp_link.test_video.test_dyn_range, - link->dp_link.test_video.test_bit_depth, - link->dp_link.test_video.test_h_total, - link->dp_link.test_video.test_v_total, - link->dp_link.test_video.test_h_start, - link->dp_link.test_video.test_v_start, - link->dp_link.test_video.test_hsync_pol, - link->dp_link.test_video.test_hsync_width, - link->dp_link.test_video.test_vsync_pol, - link->dp_link.test_video.test_vsync_width, - link->dp_link.test_video.test_h_width, - link->dp_link.test_video.test_v_height, - link->dp_link.test_video.test_rr_d, - link->dp_link.test_video.test_rr_n); + link->msm_dp_link.test_video.test_video_pattern, + link->msm_dp_link.test_video.test_dyn_range, + link->msm_dp_link.test_video.test_bit_depth, + link->msm_dp_link.test_video.test_h_total, + link->msm_dp_link.test_video.test_v_total, + link->msm_dp_link.test_video.test_h_start, + link->msm_dp_link.test_video.test_v_start, + link->msm_dp_link.test_video.test_hsync_pol, + link->msm_dp_link.test_video.test_hsync_width, + link->msm_dp_link.test_video.test_vsync_pol, + link->msm_dp_link.test_video.test_vsync_width, + link->msm_dp_link.test_video.test_h_width, + link->msm_dp_link.test_video.test_v_height, + link->msm_dp_link.test_video.test_rr_d, + link->msm_dp_link.test_video.test_rr_n); return ret; } /** - * dp_link_parse_link_training_params() - parses link training parameters from + * msm_dp_link_parse_link_training_params() - parses link training parameters from * DPCD * @link: Display Port Driver data * * Returns 0 if it successfully parses the link rate (Byte 0x219) and lane * count (Byte 0x220), and if these values parse are valid. */ -static int dp_link_parse_link_training_params(struct dp_link_private *link) +static int msm_dp_link_parse_link_training_params(struct msm_dp_link_private *link) { u8 bp; ssize_t rlen; @@ -571,13 +571,13 @@ static int dp_link_parse_link_training_params(struct dp_link_private *link) } /** - * dp_link_parse_phy_test_params() - parses the phy link parameters + * msm_dp_link_parse_phy_test_params() - parses the phy link parameters * @link: Display Port Driver data * * Parses the DPCD (Byte 0x248) for the DP PHY link pattern that is being * requested. */ -static int dp_link_parse_phy_test_params(struct dp_link_private *link) +static int msm_dp_link_parse_phy_test_params(struct msm_dp_link_private *link) { u8 data; ssize_t rlen; @@ -589,7 +589,7 @@ static int dp_link_parse_phy_test_params(struct dp_link_private *link) return rlen; } - link->dp_link.phy_params.phy_test_pattern_sel = data & 0x07; + link->msm_dp_link.phy_params.phy_test_pattern_sel = data & 0x07; drm_dbg_dp(link->drm_dev, "phy_test_pattern_sel = 0x%x\n", data); @@ -608,12 +608,12 @@ static int dp_link_parse_phy_test_params(struct dp_link_private *link) } /** - * dp_link_is_video_audio_test_requested() - checks for audio/video link request + * msm_dp_link_is_video_audio_test_requested() - checks for audio/video link request * @link: link requested by the sink * * Returns true if the requested link is a permitted audio/video link. */ -static bool dp_link_is_video_audio_test_requested(u32 link) +static bool msm_dp_link_is_video_audio_test_requested(u32 link) { u8 video_audio_test = (DP_TEST_LINK_VIDEO_PATTERN | DP_TEST_LINK_AUDIO_PATTERN | @@ -624,13 +624,13 @@ static bool dp_link_is_video_audio_test_requested(u32 link) } /** - * dp_link_parse_request() - parses link request parameters from sink + * msm_dp_link_parse_request() - parses link request parameters from sink * @link: Display Port Driver data * * Parses the DPCD to check if an automated link is requested (Byte 0x201), * and what type of link automation is being requested (Byte 0x218). */ -static int dp_link_parse_request(struct dp_link_private *link) +static int msm_dp_link_parse_request(struct msm_dp_link_private *link) { int ret = 0; u8 data; @@ -672,27 +672,27 @@ static int dp_link_parse_request(struct dp_link_private *link) drm_dbg_dp(link->drm_dev, "Test:(0x%x) requested\n", data); link->request.test_requested = data; if (link->request.test_requested == DP_TEST_LINK_PHY_TEST_PATTERN) { - ret = dp_link_parse_phy_test_params(link); + ret = msm_dp_link_parse_phy_test_params(link); if (ret) goto end; - ret = dp_link_parse_link_training_params(link); + ret = msm_dp_link_parse_link_training_params(link); if (ret) goto end; } if (link->request.test_requested == DP_TEST_LINK_TRAINING) { - ret = dp_link_parse_link_training_params(link); + ret = msm_dp_link_parse_link_training_params(link); if (ret) goto end; } - if (dp_link_is_video_audio_test_requested( + if (msm_dp_link_is_video_audio_test_requested( link->request.test_requested)) { - ret = dp_link_parse_video_pattern_params(link); + ret = msm_dp_link_parse_video_pattern_params(link); if (ret) goto end; - ret = dp_link_parse_audio_pattern_params(link); + ret = msm_dp_link_parse_audio_pattern_params(link); } end: /* @@ -700,29 +700,29 @@ end: * a DP_TEST_NAK. */ if (ret) { - link->dp_link.test_response = DP_TEST_NAK; + link->msm_dp_link.test_response = DP_TEST_NAK; } else { if (link->request.test_requested != DP_TEST_LINK_EDID_READ) - link->dp_link.test_response = DP_TEST_ACK; + link->msm_dp_link.test_response = DP_TEST_ACK; else - link->dp_link.test_response = + link->msm_dp_link.test_response = DP_TEST_EDID_CHECKSUM_WRITE; } return ret; } -static int dp_link_parse_sink_status_field(struct dp_link_private *link) +static int msm_dp_link_parse_sink_status_field(struct msm_dp_link_private *link) { int len; - link->prev_sink_count = link->dp_link.sink_count; + link->prev_sink_count = link->msm_dp_link.sink_count; len = drm_dp_read_sink_count(link->aux); if (len < 0) { DRM_ERROR("DP parse sink count failed\n"); return len; } - link->dp_link.sink_count = len; + link->msm_dp_link.sink_count = len; len = drm_dp_dpcd_read_link_status(link->aux, link->link_status); @@ -731,11 +731,11 @@ static int dp_link_parse_sink_status_field(struct dp_link_private *link) return len; } - return dp_link_parse_request(link); + return msm_dp_link_parse_request(link); } /** - * dp_link_process_link_training_request() - processes new training requests + * msm_dp_link_process_link_training_request() - processes new training requests * @link: Display Port link data * * This function will handle new link training requests that are initiated by @@ -745,7 +745,7 @@ static int dp_link_parse_sink_status_field(struct dp_link_private *link) * The function will return 0 if a link training request has been processed, * otherwise it will return -EINVAL. */ -static int dp_link_process_link_training_request(struct dp_link_private *link) +static int msm_dp_link_process_link_training_request(struct msm_dp_link_private *link) { if (link->request.test_requested != DP_TEST_LINK_TRAINING) return -EINVAL; @@ -756,49 +756,49 @@ static int dp_link_process_link_training_request(struct dp_link_private *link) link->request.test_link_rate, link->request.test_lane_count); - link->dp_link.link_params.num_lanes = link->request.test_lane_count; - link->dp_link.link_params.rate = + link->msm_dp_link.link_params.num_lanes = link->request.test_lane_count; + link->msm_dp_link.link_params.rate = drm_dp_bw_code_to_link_rate(link->request.test_link_rate); return 0; } -bool dp_link_send_test_response(struct dp_link *dp_link) +bool msm_dp_link_send_test_response(struct msm_dp_link *msm_dp_link) { - struct dp_link_private *link = NULL; + struct msm_dp_link_private *link = NULL; int ret = 0; - if (!dp_link) { + if (!msm_dp_link) { DRM_ERROR("invalid input\n"); return false; } - link = container_of(dp_link, struct dp_link_private, dp_link); + link = container_of(msm_dp_link, struct msm_dp_link_private, msm_dp_link); ret = drm_dp_dpcd_writeb(link->aux, DP_TEST_RESPONSE, - dp_link->test_response); + msm_dp_link->test_response); return ret == 1; } -int dp_link_psm_config(struct dp_link *dp_link, - struct dp_link_info *link_info, bool enable) +int msm_dp_link_psm_config(struct msm_dp_link *msm_dp_link, + struct msm_dp_link_info *link_info, bool enable) { - struct dp_link_private *link = NULL; + struct msm_dp_link_private *link = NULL; int ret = 0; - if (!dp_link) { + if (!msm_dp_link) { DRM_ERROR("invalid params\n"); return -EINVAL; } - link = container_of(dp_link, struct dp_link_private, dp_link); + link = container_of(msm_dp_link, struct msm_dp_link_private, msm_dp_link); mutex_lock(&link->psm_mutex); if (enable) - ret = dp_aux_link_power_down(link->aux, link_info); + ret = msm_dp_aux_link_power_down(link->aux, link_info); else - ret = dp_aux_link_power_up(link->aux, link_info); + ret = msm_dp_aux_link_power_up(link->aux, link_info); if (ret) DRM_ERROR("Failed to %s low power mode\n", enable ? @@ -808,24 +808,24 @@ int dp_link_psm_config(struct dp_link *dp_link, return ret; } -bool dp_link_send_edid_checksum(struct dp_link *dp_link, u8 checksum) +bool msm_dp_link_send_edid_checksum(struct msm_dp_link *msm_dp_link, u8 checksum) { - struct dp_link_private *link = NULL; + struct msm_dp_link_private *link = NULL; int ret = 0; - if (!dp_link) { + if (!msm_dp_link) { DRM_ERROR("invalid input\n"); return false; } - link = container_of(dp_link, struct dp_link_private, dp_link); + link = container_of(msm_dp_link, struct msm_dp_link_private, msm_dp_link); ret = drm_dp_dpcd_writeb(link->aux, DP_TEST_EDID_CHECKSUM, checksum); return ret == 1; } -static void dp_link_parse_vx_px(struct dp_link_private *link) +static void msm_dp_link_parse_vx_px(struct msm_dp_link_private *link) { drm_dbg_dp(link->drm_dev, "vx: 0=%d, 1=%d, 2=%d, 3=%d\n", drm_dp_get_adjust_request_voltage(link->link_status, 0), @@ -845,31 +845,31 @@ static void dp_link_parse_vx_px(struct dp_link_private *link) */ drm_dbg_dp(link->drm_dev, "Current: v_level = 0x%x, p_level = 0x%x\n", - link->dp_link.phy_params.v_level, - link->dp_link.phy_params.p_level); - link->dp_link.phy_params.v_level = + link->msm_dp_link.phy_params.v_level, + link->msm_dp_link.phy_params.p_level); + link->msm_dp_link.phy_params.v_level = drm_dp_get_adjust_request_voltage(link->link_status, 0); - link->dp_link.phy_params.p_level = + link->msm_dp_link.phy_params.p_level = drm_dp_get_adjust_request_pre_emphasis(link->link_status, 0); - link->dp_link.phy_params.p_level >>= DP_TRAIN_PRE_EMPHASIS_SHIFT; + link->msm_dp_link.phy_params.p_level >>= DP_TRAIN_PRE_EMPHASIS_SHIFT; drm_dbg_dp(link->drm_dev, "Requested: v_level = 0x%x, p_level = 0x%x\n", - link->dp_link.phy_params.v_level, - link->dp_link.phy_params.p_level); + link->msm_dp_link.phy_params.v_level, + link->msm_dp_link.phy_params.p_level); } /** - * dp_link_process_phy_test_pattern_request() - process new phy link requests + * msm_dp_link_process_phy_test_pattern_request() - process new phy link requests * @link: Display Port Driver data * * This function will handle new phy link pattern requests that are initiated * by the sink. The function will return 0 if a phy link pattern has been * processed, otherwise it will return -EINVAL. */ -static int dp_link_process_phy_test_pattern_request( - struct dp_link_private *link) +static int msm_dp_link_process_phy_test_pattern_request( + struct msm_dp_link_private *link) { if (!(link->request.test_requested & DP_TEST_LINK_PHY_TEST_PATTERN)) { drm_dbg_dp(link->drm_dev, "no phy test\n"); @@ -886,24 +886,24 @@ static int dp_link_process_phy_test_pattern_request( drm_dbg_dp(link->drm_dev, "Current: rate = 0x%x, lane count = 0x%x\n", - link->dp_link.link_params.rate, - link->dp_link.link_params.num_lanes); + link->msm_dp_link.link_params.rate, + link->msm_dp_link.link_params.num_lanes); drm_dbg_dp(link->drm_dev, "Requested: rate = 0x%x, lane count = 0x%x\n", link->request.test_link_rate, link->request.test_lane_count); - link->dp_link.link_params.num_lanes = link->request.test_lane_count; - link->dp_link.link_params.rate = + link->msm_dp_link.link_params.num_lanes = link->request.test_lane_count; + link->msm_dp_link.link_params.rate = drm_dp_bw_code_to_link_rate(link->request.test_link_rate); - dp_link_parse_vx_px(link); + msm_dp_link_parse_vx_px(link); return 0; } -static bool dp_link_read_psr_error_status(struct dp_link_private *link) +static bool msm_dp_link_read_psr_error_status(struct msm_dp_link_private *link) { u8 status; @@ -921,7 +921,7 @@ static bool dp_link_read_psr_error_status(struct dp_link_private *link) return true; } -static bool dp_link_psr_capability_changed(struct dp_link_private *link) +static bool msm_dp_link_psr_capability_changed(struct msm_dp_link_private *link) { u8 status; @@ -941,7 +941,7 @@ static u8 get_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r) } /** - * dp_link_process_link_status_update() - processes link status updates + * msm_dp_link_process_link_status_update() - processes link status updates * @link: Display Port link module data * * This function will check for changes in the link status, e.g. clock @@ -951,13 +951,13 @@ static u8 get_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r) * The function will return 0 if the a link status update has been processed, * otherwise it will return -EINVAL. */ -static int dp_link_process_link_status_update(struct dp_link_private *link) +static int msm_dp_link_process_link_status_update(struct msm_dp_link_private *link) { bool channel_eq_done = drm_dp_channel_eq_ok(link->link_status, - link->dp_link.link_params.num_lanes); + link->msm_dp_link.link_params.num_lanes); bool clock_recovery_done = drm_dp_clock_recovery_ok(link->link_status, - link->dp_link.link_params.num_lanes); + link->msm_dp_link.link_params.num_lanes); drm_dbg_dp(link->drm_dev, "channel_eq_done = %d, clock_recovery_done = %d\n", @@ -970,7 +970,7 @@ static int dp_link_process_link_status_update(struct dp_link_private *link) } /** - * dp_link_process_ds_port_status_change() - process port status changes + * msm_dp_link_process_ds_port_status_change() - process port status changes * @link: Display Port Driver data * * This function will handle downstream port updates that are initiated by @@ -980,122 +980,122 @@ static int dp_link_process_link_status_update(struct dp_link_private *link) * The function will return 0 if a downstream port update has been * processed, otherwise it will return -EINVAL. */ -static int dp_link_process_ds_port_status_change(struct dp_link_private *link) +static int msm_dp_link_process_ds_port_status_change(struct msm_dp_link_private *link) { if (get_link_status(link->link_status, DP_LANE_ALIGN_STATUS_UPDATED) & DP_DOWNSTREAM_PORT_STATUS_CHANGED) goto reset; - if (link->prev_sink_count == link->dp_link.sink_count) + if (link->prev_sink_count == link->msm_dp_link.sink_count) return -EINVAL; reset: /* reset prev_sink_count */ - link->prev_sink_count = link->dp_link.sink_count; + link->prev_sink_count = link->msm_dp_link.sink_count; return 0; } -static bool dp_link_is_video_pattern_requested(struct dp_link_private *link) +static bool msm_dp_link_is_video_pattern_requested(struct msm_dp_link_private *link) { return (link->request.test_requested & DP_TEST_LINK_VIDEO_PATTERN) && !(link->request.test_requested & DP_TEST_LINK_AUDIO_DISABLED_VIDEO); } -static bool dp_link_is_audio_pattern_requested(struct dp_link_private *link) +static bool msm_dp_link_is_audio_pattern_requested(struct msm_dp_link_private *link) { return (link->request.test_requested & DP_TEST_LINK_AUDIO_PATTERN); } -static void dp_link_reset_data(struct dp_link_private *link) +static void msm_dp_link_reset_data(struct msm_dp_link_private *link) { - link->request = (const struct dp_link_request){ 0 }; - link->dp_link.test_video = (const struct dp_link_test_video){ 0 }; - link->dp_link.test_video.test_bit_depth = DP_TEST_BIT_DEPTH_UNKNOWN; - link->dp_link.test_audio = (const struct dp_link_test_audio){ 0 }; - link->dp_link.phy_params.phy_test_pattern_sel = 0; - link->dp_link.sink_request = 0; - link->dp_link.test_response = 0; + link->request = (const struct msm_dp_link_request){ 0 }; + link->msm_dp_link.test_video = (const struct msm_dp_link_test_video){ 0 }; + link->msm_dp_link.test_video.test_bit_depth = DP_TEST_BIT_DEPTH_UNKNOWN; + link->msm_dp_link.test_audio = (const struct msm_dp_link_test_audio){ 0 }; + link->msm_dp_link.phy_params.phy_test_pattern_sel = 0; + link->msm_dp_link.sink_request = 0; + link->msm_dp_link.test_response = 0; } /** - * dp_link_process_request() - handle HPD IRQ transition to HIGH - * @dp_link: pointer to link module data + * msm_dp_link_process_request() - handle HPD IRQ transition to HIGH + * @msm_dp_link: pointer to link module data * * This function will handle the HPD IRQ state transitions from LOW to HIGH * (including cases when there are back to back HPD IRQ HIGH) indicating * the start of a new link training request or sink status update. */ -int dp_link_process_request(struct dp_link *dp_link) +int msm_dp_link_process_request(struct msm_dp_link *msm_dp_link) { int ret = 0; - struct dp_link_private *link; + struct msm_dp_link_private *link; - if (!dp_link) { + if (!msm_dp_link) { DRM_ERROR("invalid input\n"); return -EINVAL; } - link = container_of(dp_link, struct dp_link_private, dp_link); + link = container_of(msm_dp_link, struct msm_dp_link_private, msm_dp_link); - dp_link_reset_data(link); + msm_dp_link_reset_data(link); - ret = dp_link_parse_sink_status_field(link); + ret = msm_dp_link_parse_sink_status_field(link); if (ret) return ret; if (link->request.test_requested == DP_TEST_LINK_EDID_READ) { - dp_link->sink_request |= DP_TEST_LINK_EDID_READ; - } else if (!dp_link_process_ds_port_status_change(link)) { - dp_link->sink_request |= DS_PORT_STATUS_CHANGED; - } else if (!dp_link_process_link_training_request(link)) { - dp_link->sink_request |= DP_TEST_LINK_TRAINING; - } else if (!dp_link_process_phy_test_pattern_request(link)) { - dp_link->sink_request |= DP_TEST_LINK_PHY_TEST_PATTERN; - } else if (dp_link_read_psr_error_status(link)) { + msm_dp_link->sink_request |= DP_TEST_LINK_EDID_READ; + } else if (!msm_dp_link_process_ds_port_status_change(link)) { + msm_dp_link->sink_request |= DS_PORT_STATUS_CHANGED; + } else if (!msm_dp_link_process_link_training_request(link)) { + msm_dp_link->sink_request |= DP_TEST_LINK_TRAINING; + } else if (!msm_dp_link_process_phy_test_pattern_request(link)) { + msm_dp_link->sink_request |= DP_TEST_LINK_PHY_TEST_PATTERN; + } else if (msm_dp_link_read_psr_error_status(link)) { DRM_ERROR("PSR IRQ_HPD received\n"); - } else if (dp_link_psr_capability_changed(link)) { + } else if (msm_dp_link_psr_capability_changed(link)) { drm_dbg_dp(link->drm_dev, "PSR Capability changed\n"); } else { - ret = dp_link_process_link_status_update(link); + ret = msm_dp_link_process_link_status_update(link); if (!ret) { - dp_link->sink_request |= DP_LINK_STATUS_UPDATED; + msm_dp_link->sink_request |= DP_LINK_STATUS_UPDATED; } else { - if (dp_link_is_video_pattern_requested(link)) { + if (msm_dp_link_is_video_pattern_requested(link)) { ret = 0; - dp_link->sink_request |= DP_TEST_LINK_VIDEO_PATTERN; + msm_dp_link->sink_request |= DP_TEST_LINK_VIDEO_PATTERN; } - if (dp_link_is_audio_pattern_requested(link)) { - dp_link->sink_request |= DP_TEST_LINK_AUDIO_PATTERN; + if (msm_dp_link_is_audio_pattern_requested(link)) { + msm_dp_link->sink_request |= DP_TEST_LINK_AUDIO_PATTERN; ret = -EINVAL; } } } drm_dbg_dp(link->drm_dev, "sink request=%#x\n", - dp_link->sink_request); + msm_dp_link->sink_request); return ret; } -int dp_link_get_colorimetry_config(struct dp_link *dp_link) +int msm_dp_link_get_colorimetry_config(struct msm_dp_link *msm_dp_link) { u32 cc = DP_MISC0_COLORIMERY_CFG_LEGACY_RGB; - struct dp_link_private *link; + struct msm_dp_link_private *link; - if (!dp_link) { + if (!msm_dp_link) { DRM_ERROR("invalid input\n"); return -EINVAL; } - link = container_of(dp_link, struct dp_link_private, dp_link); + link = container_of(msm_dp_link, struct msm_dp_link_private, msm_dp_link); /* * Unless a video pattern CTS test is ongoing, use RGB_VESA * Only RGB_VESA and RGB_CEA supported for now */ - if (dp_link_is_video_pattern_requested(link)) { - if (link->dp_link.test_video.test_dyn_range & + if (msm_dp_link_is_video_pattern_requested(link)) { + if (link->msm_dp_link.test_video.test_dyn_range & DP_TEST_DYNAMIC_RANGE_CEA) cc = DP_MISC0_COLORIMERY_CFG_CEA_RGB; } @@ -1103,22 +1103,22 @@ int dp_link_get_colorimetry_config(struct dp_link *dp_link) return cc; } -int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status) +int msm_dp_link_adjust_levels(struct msm_dp_link *msm_dp_link, u8 *link_status) { int i; u8 max_p_level; int v_max = 0, p_max = 0; - struct dp_link_private *link; + struct msm_dp_link_private *link; - if (!dp_link) { + if (!msm_dp_link) { DRM_ERROR("invalid input\n"); return -EINVAL; } - link = container_of(dp_link, struct dp_link_private, dp_link); + link = container_of(msm_dp_link, struct msm_dp_link_private, msm_dp_link); /* use the max level across lanes */ - for (i = 0; i < dp_link->link_params.num_lanes; i++) { + for (i = 0; i < msm_dp_link->link_params.num_lanes; i++) { u8 data_v = drm_dp_get_adjust_request_voltage(link_status, i); u8 data_p = drm_dp_get_adjust_request_pre_emphasis(link_status, i); @@ -1131,56 +1131,56 @@ int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status) p_max = data_p; } - dp_link->phy_params.v_level = v_max >> DP_TRAIN_VOLTAGE_SWING_SHIFT; - dp_link->phy_params.p_level = p_max >> DP_TRAIN_PRE_EMPHASIS_SHIFT; + msm_dp_link->phy_params.v_level = v_max >> DP_TRAIN_VOLTAGE_SWING_SHIFT; + msm_dp_link->phy_params.p_level = p_max >> DP_TRAIN_PRE_EMPHASIS_SHIFT; /** * Adjust the voltage swing and pre-emphasis level combination to within * the allowable range. */ - if (dp_link->phy_params.v_level > DP_TRAIN_LEVEL_MAX) { + if (msm_dp_link->phy_params.v_level > DP_TRAIN_LEVEL_MAX) { drm_dbg_dp(link->drm_dev, "Requested vSwingLevel=%d, change to %d\n", - dp_link->phy_params.v_level, + msm_dp_link->phy_params.v_level, DP_TRAIN_LEVEL_MAX); - dp_link->phy_params.v_level = DP_TRAIN_LEVEL_MAX; + msm_dp_link->phy_params.v_level = DP_TRAIN_LEVEL_MAX; } - if (dp_link->phy_params.p_level > DP_TRAIN_LEVEL_MAX) { + if (msm_dp_link->phy_params.p_level > DP_TRAIN_LEVEL_MAX) { drm_dbg_dp(link->drm_dev, "Requested preEmphasisLevel=%d, change to %d\n", - dp_link->phy_params.p_level, + msm_dp_link->phy_params.p_level, DP_TRAIN_LEVEL_MAX); - dp_link->phy_params.p_level = DP_TRAIN_LEVEL_MAX; + msm_dp_link->phy_params.p_level = DP_TRAIN_LEVEL_MAX; } - max_p_level = DP_TRAIN_LEVEL_MAX - dp_link->phy_params.v_level; - if (dp_link->phy_params.p_level > max_p_level) { + max_p_level = DP_TRAIN_LEVEL_MAX - msm_dp_link->phy_params.v_level; + if (msm_dp_link->phy_params.p_level > max_p_level) { drm_dbg_dp(link->drm_dev, "Requested preEmphasisLevel=%d, change to %d\n", - dp_link->phy_params.p_level, + msm_dp_link->phy_params.p_level, max_p_level); - dp_link->phy_params.p_level = max_p_level; + msm_dp_link->phy_params.p_level = max_p_level; } drm_dbg_dp(link->drm_dev, "adjusted: v_level=%d, p_level=%d\n", - dp_link->phy_params.v_level, dp_link->phy_params.p_level); + msm_dp_link->phy_params.v_level, msm_dp_link->phy_params.p_level); return 0; } -void dp_link_reset_phy_params_vx_px(struct dp_link *dp_link) +void msm_dp_link_reset_phy_params_vx_px(struct msm_dp_link *msm_dp_link) { - dp_link->phy_params.v_level = 0; - dp_link->phy_params.p_level = 0; + msm_dp_link->phy_params.v_level = 0; + msm_dp_link->phy_params.p_level = 0; } -u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp) +u32 msm_dp_link_get_test_bits_depth(struct msm_dp_link *msm_dp_link, u32 bpp) { u32 tbd; - struct dp_link_private *link; + struct msm_dp_link_private *link; - link = container_of(dp_link, struct dp_link_private, dp_link); + link = container_of(msm_dp_link, struct msm_dp_link_private, msm_dp_link); /* * Few simplistic rules and assumptions made here: @@ -1209,10 +1209,10 @@ u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp) return tbd; } -struct dp_link *dp_link_get(struct device *dev, struct drm_dp_aux *aux) +struct msm_dp_link *msm_dp_link_get(struct device *dev, struct drm_dp_aux *aux) { - struct dp_link_private *link; - struct dp_link *dp_link; + struct msm_dp_link_private *link; + struct msm_dp_link *msm_dp_link; if (!dev || !aux) { DRM_ERROR("invalid input\n"); @@ -1226,7 +1226,7 @@ struct dp_link *dp_link_get(struct device *dev, struct drm_dp_aux *aux) link->aux = aux; mutex_init(&link->psm_mutex); - dp_link = &link->dp_link; + msm_dp_link = &link->msm_dp_link; - return dp_link; + return msm_dp_link; } diff --git a/drivers/gpu/drm/msm/dp/dp_link.h b/drivers/gpu/drm/msm/dp/dp_link.h index 5846337bb56f..8db5d5698a97 100644 --- a/drivers/gpu/drm/msm/dp/dp_link.h +++ b/drivers/gpu/drm/msm/dp/dp_link.h @@ -12,7 +12,7 @@ #define DP_TEST_BIT_DEPTH_UNKNOWN 0xFFFFFFFF #define DP_LINK_CAP_ENHANCED_FRAMING (1 << 0) -struct dp_link_info { +struct msm_dp_link_info { unsigned char revision; unsigned int rate; unsigned int num_lanes; @@ -21,7 +21,7 @@ struct dp_link_info { #define DP_TRAIN_LEVEL_MAX 3 -struct dp_link_test_video { +struct msm_dp_link_test_video { u32 test_video_pattern; u32 test_bit_depth; u32 test_dyn_range; @@ -39,7 +39,7 @@ struct dp_link_test_video { u32 test_rr_n; }; -struct dp_link_test_audio { +struct msm_dp_link_test_audio { u32 test_audio_sampling_rate; u32 test_audio_channel_count; u32 test_audio_pattern_type; @@ -53,21 +53,21 @@ struct dp_link_test_audio { u32 test_audio_period_ch_8; }; -struct dp_link_phy_params { +struct msm_dp_link_phy_params { u32 phy_test_pattern_sel; u8 v_level; u8 p_level; }; -struct dp_link { +struct msm_dp_link { u32 sink_request; u32 test_response; u8 sink_count; - struct dp_link_test_video test_video; - struct dp_link_test_audio test_audio; - struct dp_link_phy_params phy_params; - struct dp_link_info link_params; + struct msm_dp_link_test_video test_video; + struct msm_dp_link_test_audio test_audio; + struct msm_dp_link_phy_params phy_params; + struct msm_dp_link_info link_params; }; /** @@ -78,7 +78,7 @@ struct dp_link { * git bit depth value. This function assumes that bit depth has * already been validated. */ -static inline u32 dp_link_bit_depth_to_bpp(u32 tbd) +static inline u32 msm_dp_link_bit_depth_to_bpp(u32 tbd) { /* * Few simplistic rules and assumptions made here: @@ -99,22 +99,22 @@ static inline u32 dp_link_bit_depth_to_bpp(u32 tbd) } } -void dp_link_reset_phy_params_vx_px(struct dp_link *dp_link); -u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp); -int dp_link_process_request(struct dp_link *dp_link); -int dp_link_get_colorimetry_config(struct dp_link *dp_link); -int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status); -bool dp_link_send_test_response(struct dp_link *dp_link); -int dp_link_psm_config(struct dp_link *dp_link, - struct dp_link_info *link_info, bool enable); -bool dp_link_send_edid_checksum(struct dp_link *dp_link, u8 checksum); +void msm_dp_link_reset_phy_params_vx_px(struct msm_dp_link *msm_dp_link); +u32 msm_dp_link_get_test_bits_depth(struct msm_dp_link *msm_dp_link, u32 bpp); +int msm_dp_link_process_request(struct msm_dp_link *msm_dp_link); +int msm_dp_link_get_colorimetry_config(struct msm_dp_link *msm_dp_link); +int msm_dp_link_adjust_levels(struct msm_dp_link *msm_dp_link, u8 *link_status); +bool msm_dp_link_send_test_response(struct msm_dp_link *msm_dp_link); +int msm_dp_link_psm_config(struct msm_dp_link *msm_dp_link, + struct msm_dp_link_info *link_info, bool enable); +bool msm_dp_link_send_edid_checksum(struct msm_dp_link *msm_dp_link, u8 checksum); /** - * dp_link_get() - get the functionalities of dp test module + * msm_dp_link_get() - get the functionalities of dp test module * * - * return: a pointer to dp_link struct + * return: a pointer to msm_dp_link struct */ -struct dp_link *dp_link_get(struct device *dev, struct drm_dp_aux *aux); +struct msm_dp_link *msm_dp_link_get(struct device *dev, struct drm_dp_aux *aux); #endif /* _DP_LINK_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c index 6ff6c9ef351f..5d7eaa31bf31 100644 --- a/drivers/gpu/drm/msm/dp/dp_panel.c +++ b/drivers/gpu/drm/msm/dp/dp_panel.c @@ -14,52 +14,52 @@ #define DP_MAX_NUM_DP_LANES 4 #define DP_LINK_RATE_HBR2 540000 /* kbytes */ -struct dp_panel_private { +struct msm_dp_panel_private { struct device *dev; struct drm_device *drm_dev; - struct dp_panel dp_panel; + struct msm_dp_panel msm_dp_panel; struct drm_dp_aux *aux; - struct dp_link *link; - struct dp_catalog *catalog; + struct msm_dp_link *link; + struct msm_dp_catalog *catalog; bool panel_on; }; -static void dp_panel_read_psr_cap(struct dp_panel_private *panel) +static void msm_dp_panel_read_psr_cap(struct msm_dp_panel_private *panel) { ssize_t rlen; - struct dp_panel *dp_panel; + struct msm_dp_panel *msm_dp_panel; - dp_panel = &panel->dp_panel; + msm_dp_panel = &panel->msm_dp_panel; /* edp sink */ - if (dp_panel->dpcd[DP_EDP_CONFIGURATION_CAP]) { + if (msm_dp_panel->dpcd[DP_EDP_CONFIGURATION_CAP]) { rlen = drm_dp_dpcd_read(panel->aux, DP_PSR_SUPPORT, - &dp_panel->psr_cap, sizeof(dp_panel->psr_cap)); - if (rlen == sizeof(dp_panel->psr_cap)) { + &msm_dp_panel->psr_cap, sizeof(msm_dp_panel->psr_cap)); + if (rlen == sizeof(msm_dp_panel->psr_cap)) { drm_dbg_dp(panel->drm_dev, "psr version: 0x%x, psr_cap: 0x%x\n", - dp_panel->psr_cap.version, - dp_panel->psr_cap.capabilities); + msm_dp_panel->psr_cap.version, + msm_dp_panel->psr_cap.capabilities); } else DRM_ERROR("failed to read psr info, rlen=%zd\n", rlen); } } -static int dp_panel_read_dpcd(struct dp_panel *dp_panel) +static int msm_dp_panel_read_dpcd(struct msm_dp_panel *msm_dp_panel) { int rc; - struct dp_panel_private *panel; - struct dp_link_info *link_info; + struct msm_dp_panel_private *panel; + struct msm_dp_link_info *link_info; u8 *dpcd, major, minor; - panel = container_of(dp_panel, struct dp_panel_private, dp_panel); - dpcd = dp_panel->dpcd; + panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel); + dpcd = msm_dp_panel->dpcd; rc = drm_dp_read_dpcd_caps(panel->aux, dpcd); if (rc) return rc; - dp_panel->vsc_sdp_supported = drm_dp_vsc_sdp_supported(panel->aux, dpcd); - link_info = &dp_panel->link_info; + msm_dp_panel->vsc_sdp_supported = drm_dp_vsc_sdp_supported(panel->aux, dpcd); + link_info = &msm_dp_panel->link_info; link_info->revision = dpcd[DP_DPCD_REV]; major = (link_info->revision >> 4) & 0x0f; minor = link_info->revision & 0x0f; @@ -68,12 +68,12 @@ static int dp_panel_read_dpcd(struct dp_panel *dp_panel) link_info->num_lanes = drm_dp_max_lane_count(dpcd); /* Limit data lanes from data-lanes of endpoint property of dtsi */ - if (link_info->num_lanes > dp_panel->max_dp_lanes) - link_info->num_lanes = dp_panel->max_dp_lanes; + if (link_info->num_lanes > msm_dp_panel->max_dp_lanes) + link_info->num_lanes = msm_dp_panel->max_dp_lanes; /* Limit link rate from link-frequencies of endpoint property of dtsi */ - if (link_info->rate > dp_panel->max_dp_link_rate) - link_info->rate = dp_panel->max_dp_link_rate; + if (link_info->rate > msm_dp_panel->max_dp_link_rate) + link_info->rate = msm_dp_panel->max_dp_link_rate; drm_dbg_dp(panel->drm_dev, "version: %d.%d\n", major, minor); drm_dbg_dp(panel->drm_dev, "link_rate=%d\n", link_info->rate); @@ -82,21 +82,21 @@ static int dp_panel_read_dpcd(struct dp_panel *dp_panel) if (drm_dp_enhanced_frame_cap(dpcd)) link_info->capabilities |= DP_LINK_CAP_ENHANCED_FRAMING; - dp_panel_read_psr_cap(panel); + msm_dp_panel_read_psr_cap(panel); return rc; } -static u32 dp_panel_get_supported_bpp(struct dp_panel *dp_panel, +static u32 msm_dp_panel_get_supported_bpp(struct msm_dp_panel *msm_dp_panel, u32 mode_edid_bpp, u32 mode_pclk_khz) { - const struct dp_link_info *link_info; + const struct msm_dp_link_info *link_info; const u32 max_supported_bpp = 30, min_supported_bpp = 18; u32 bpp, data_rate_khz; bpp = min(mode_edid_bpp, max_supported_bpp); - link_info = &dp_panel->link_info; + link_info = &msm_dp_panel->link_info; data_rate_khz = link_info->num_lanes * link_info->rate * 8; do { @@ -108,39 +108,39 @@ static u32 dp_panel_get_supported_bpp(struct dp_panel *dp_panel, return min_supported_bpp; } -int dp_panel_read_sink_caps(struct dp_panel *dp_panel, +int msm_dp_panel_read_sink_caps(struct msm_dp_panel *msm_dp_panel, struct drm_connector *connector) { int rc, bw_code; int count; - struct dp_panel_private *panel; + struct msm_dp_panel_private *panel; - if (!dp_panel || !connector) { + if (!msm_dp_panel || !connector) { DRM_ERROR("invalid input\n"); return -EINVAL; } - panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel); drm_dbg_dp(panel->drm_dev, "max_lanes=%d max_link_rate=%d\n", - dp_panel->max_dp_lanes, dp_panel->max_dp_link_rate); + msm_dp_panel->max_dp_lanes, msm_dp_panel->max_dp_link_rate); - rc = dp_panel_read_dpcd(dp_panel); + rc = msm_dp_panel_read_dpcd(msm_dp_panel); if (rc) { DRM_ERROR("read dpcd failed %d\n", rc); return rc; } - bw_code = drm_dp_link_rate_to_bw_code(dp_panel->link_info.rate); + bw_code = drm_dp_link_rate_to_bw_code(msm_dp_panel->link_info.rate); if (!is_link_rate_valid(bw_code) || - !is_lane_count_valid(dp_panel->link_info.num_lanes) || - (bw_code > dp_panel->max_bw_code)) { - DRM_ERROR("Illegal link rate=%d lane=%d\n", dp_panel->link_info.rate, - dp_panel->link_info.num_lanes); + !is_lane_count_valid(msm_dp_panel->link_info.num_lanes) || + (bw_code > msm_dp_panel->max_bw_code)) { + DRM_ERROR("Illegal link rate=%d lane=%d\n", msm_dp_panel->link_info.rate, + msm_dp_panel->link_info.num_lanes); return -EINVAL; } - if (drm_dp_is_branch(dp_panel->dpcd)) { + if (drm_dp_is_branch(msm_dp_panel->dpcd)) { count = drm_dp_read_sink_count(panel->aux); if (!count) { panel->link->sink_count = 0; @@ -148,21 +148,21 @@ int dp_panel_read_sink_caps(struct dp_panel *dp_panel, } } - rc = drm_dp_read_downstream_info(panel->aux, dp_panel->dpcd, - dp_panel->downstream_ports); + rc = drm_dp_read_downstream_info(panel->aux, msm_dp_panel->dpcd, + msm_dp_panel->downstream_ports); if (rc) return rc; - drm_edid_free(dp_panel->drm_edid); + drm_edid_free(msm_dp_panel->drm_edid); - dp_panel->drm_edid = drm_edid_read_ddc(connector, &panel->aux->ddc); + msm_dp_panel->drm_edid = drm_edid_read_ddc(connector, &panel->aux->ddc); - drm_edid_connector_update(connector, dp_panel->drm_edid); + drm_edid_connector_update(connector, msm_dp_panel->drm_edid); - if (!dp_panel->drm_edid) { + if (!msm_dp_panel->drm_edid) { DRM_ERROR("panel edid read failed\n"); /* check edid read fail is due to unplug */ - if (!dp_catalog_link_is_connected(panel->catalog)) { + if (!msm_dp_catalog_link_is_connected(panel->catalog)) { rc = -ETIMEDOUT; goto end; } @@ -172,87 +172,87 @@ end: return rc; } -u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel, +u32 msm_dp_panel_get_mode_bpp(struct msm_dp_panel *msm_dp_panel, u32 mode_edid_bpp, u32 mode_pclk_khz) { - struct dp_panel_private *panel; + struct msm_dp_panel_private *panel; u32 bpp; - if (!dp_panel || !mode_edid_bpp || !mode_pclk_khz) { + if (!msm_dp_panel || !mode_edid_bpp || !mode_pclk_khz) { DRM_ERROR("invalid input\n"); return 0; } - panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel); - if (dp_panel->video_test) - bpp = dp_link_bit_depth_to_bpp( + if (msm_dp_panel->video_test) + bpp = msm_dp_link_bit_depth_to_bpp( panel->link->test_video.test_bit_depth); else - bpp = dp_panel_get_supported_bpp(dp_panel, mode_edid_bpp, + bpp = msm_dp_panel_get_supported_bpp(msm_dp_panel, mode_edid_bpp, mode_pclk_khz); return bpp; } -int dp_panel_get_modes(struct dp_panel *dp_panel, +int msm_dp_panel_get_modes(struct msm_dp_panel *msm_dp_panel, struct drm_connector *connector) { - if (!dp_panel) { + if (!msm_dp_panel) { DRM_ERROR("invalid input\n"); return -EINVAL; } - if (dp_panel->drm_edid) + if (msm_dp_panel->drm_edid) return drm_edid_connector_add_modes(connector); return 0; } -static u8 dp_panel_get_edid_checksum(const struct edid *edid) +static u8 msm_dp_panel_get_edid_checksum(const struct edid *edid) { edid += edid->extensions; return edid->checksum; } -void dp_panel_handle_sink_request(struct dp_panel *dp_panel) +void msm_dp_panel_handle_sink_request(struct msm_dp_panel *msm_dp_panel) { - struct dp_panel_private *panel; + struct msm_dp_panel_private *panel; - if (!dp_panel) { + if (!msm_dp_panel) { DRM_ERROR("invalid input\n"); return; } - panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel); if (panel->link->sink_request & DP_TEST_LINK_EDID_READ) { /* FIXME: get rid of drm_edid_raw() */ - const struct edid *edid = drm_edid_raw(dp_panel->drm_edid); + const struct edid *edid = drm_edid_raw(msm_dp_panel->drm_edid); u8 checksum; if (edid) - checksum = dp_panel_get_edid_checksum(edid); + checksum = msm_dp_panel_get_edid_checksum(edid); else - checksum = dp_panel->connector->real_edid_checksum; + checksum = msm_dp_panel->connector->real_edid_checksum; - dp_link_send_edid_checksum(panel->link, checksum); - dp_link_send_test_response(panel->link); + msm_dp_link_send_edid_checksum(panel->link, checksum); + msm_dp_link_send_test_response(panel->link); } } -void dp_panel_tpg_config(struct dp_panel *dp_panel, bool enable) +void msm_dp_panel_tpg_config(struct msm_dp_panel *msm_dp_panel, bool enable) { - struct dp_catalog *catalog; - struct dp_panel_private *panel; + struct msm_dp_catalog *catalog; + struct msm_dp_panel_private *panel; - if (!dp_panel) { + if (!msm_dp_panel) { DRM_ERROR("invalid input\n"); return; } - panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel); catalog = panel->catalog; if (!panel->panel_on) { @@ -262,31 +262,31 @@ void dp_panel_tpg_config(struct dp_panel *dp_panel, bool enable) } if (!enable) { - dp_catalog_panel_tpg_disable(catalog); + msm_dp_catalog_panel_tpg_disable(catalog); return; } drm_dbg_dp(panel->drm_dev, "calling catalog tpg_enable\n"); - dp_catalog_panel_tpg_enable(catalog, &panel->dp_panel.dp_mode.drm_mode); + msm_dp_catalog_panel_tpg_enable(catalog, &panel->msm_dp_panel.msm_dp_mode.drm_mode); } -static int dp_panel_setup_vsc_sdp_yuv_420(struct dp_panel *dp_panel) +static int msm_dp_panel_setup_vsc_sdp_yuv_420(struct msm_dp_panel *msm_dp_panel) { - struct dp_catalog *catalog; - struct dp_panel_private *panel; - struct dp_display_mode *dp_mode; + struct msm_dp_catalog *catalog; + struct msm_dp_panel_private *panel; + struct msm_dp_display_mode *msm_dp_mode; struct drm_dp_vsc_sdp vsc_sdp_data; struct dp_sdp vsc_sdp; ssize_t len; - if (!dp_panel) { + if (!msm_dp_panel) { DRM_ERROR("invalid input\n"); return -EINVAL; } - panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel); catalog = panel->catalog; - dp_mode = &dp_panel->dp_mode; + msm_dp_mode = &msm_dp_panel->msm_dp_mode; memset(&vsc_sdp_data, 0, sizeof(vsc_sdp_data)); @@ -300,7 +300,7 @@ static int dp_panel_setup_vsc_sdp_yuv_420(struct dp_panel *dp_panel) vsc_sdp_data.colorimetry = DP_COLORIMETRY_DEFAULT; /* VSC SDP Payload for DB17 */ - vsc_sdp_data.bpc = dp_mode->bpp / 3; + vsc_sdp_data.bpc = msm_dp_mode->bpp / 3; vsc_sdp_data.dynamic_range = DP_DYNAMIC_RANGE_CTA; /* VSC SDP Payload for DB18 */ @@ -312,36 +312,36 @@ static int dp_panel_setup_vsc_sdp_yuv_420(struct dp_panel *dp_panel) return len; } - dp_catalog_panel_enable_vsc_sdp(catalog, &vsc_sdp); + msm_dp_catalog_panel_enable_vsc_sdp(catalog, &vsc_sdp); return 0; } -void dp_panel_dump_regs(struct dp_panel *dp_panel) +void msm_dp_panel_dump_regs(struct msm_dp_panel *msm_dp_panel) { - struct dp_catalog *catalog; - struct dp_panel_private *panel; + struct msm_dp_catalog *catalog; + struct msm_dp_panel_private *panel; - panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel); catalog = panel->catalog; - dp_catalog_dump_regs(catalog); + msm_dp_catalog_dump_regs(catalog); } -int dp_panel_timing_cfg(struct dp_panel *dp_panel) +int msm_dp_panel_timing_cfg(struct msm_dp_panel *msm_dp_panel) { u32 data, total_ver, total_hor; - struct dp_catalog *catalog; - struct dp_panel_private *panel; + struct msm_dp_catalog *catalog; + struct msm_dp_panel_private *panel; struct drm_display_mode *drm_mode; u32 width_blanking; u32 sync_start; - u32 dp_active; + u32 msm_dp_active; u32 total; - panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel); catalog = panel->catalog; - drm_mode = &panel->dp_panel.dp_mode.drm_mode; + drm_mode = &panel->msm_dp_panel.msm_dp_mode.drm_mode; drm_dbg_dp(panel->drm_dev, "width=%d hporch= %d %d %d\n", drm_mode->hdisplay, drm_mode->htotal - drm_mode->hsync_end, @@ -371,9 +371,9 @@ int dp_panel_timing_cfg(struct dp_panel *dp_panel) data = drm_mode->vsync_end - drm_mode->vsync_start; data <<= 16; - data |= (panel->dp_panel.dp_mode.v_active_low << 31); + data |= (panel->msm_dp_panel.msm_dp_mode.v_active_low << 31); data |= drm_mode->hsync_end - drm_mode->hsync_start; - data |= (panel->dp_panel.dp_mode.h_active_low << 15); + data |= (panel->msm_dp_panel.msm_dp_mode.h_active_low << 15); width_blanking = data; @@ -381,26 +381,26 @@ int dp_panel_timing_cfg(struct dp_panel *dp_panel) data <<= 16; data |= drm_mode->hdisplay; - dp_active = data; + msm_dp_active = data; - dp_catalog_panel_timing_cfg(catalog, total, sync_start, width_blanking, dp_active); + msm_dp_catalog_panel_timing_cfg(catalog, total, sync_start, width_blanking, msm_dp_active); - if (dp_panel->dp_mode.out_fmt_is_yuv_420) - dp_panel_setup_vsc_sdp_yuv_420(dp_panel); + if (msm_dp_panel->msm_dp_mode.out_fmt_is_yuv_420) + msm_dp_panel_setup_vsc_sdp_yuv_420(msm_dp_panel); panel->panel_on = true; return 0; } -int dp_panel_init_panel_info(struct dp_panel *dp_panel) +int msm_dp_panel_init_panel_info(struct msm_dp_panel *msm_dp_panel) { struct drm_display_mode *drm_mode; - struct dp_panel_private *panel; + struct msm_dp_panel_private *panel; - drm_mode = &dp_panel->dp_mode.drm_mode; + drm_mode = &msm_dp_panel->msm_dp_mode.drm_mode; - panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel); /* * print resolution info as this is a result @@ -421,18 +421,18 @@ int dp_panel_init_panel_info(struct dp_panel *dp_panel) drm_mode->vsync_end - drm_mode->vsync_start); drm_dbg_dp(panel->drm_dev, "pixel clock (KHz)=(%d)\n", drm_mode->clock); - drm_dbg_dp(panel->drm_dev, "bpp = %d\n", dp_panel->dp_mode.bpp); + drm_dbg_dp(panel->drm_dev, "bpp = %d\n", msm_dp_panel->msm_dp_mode.bpp); - dp_panel->dp_mode.bpp = dp_panel_get_mode_bpp(dp_panel, dp_panel->dp_mode.bpp, - dp_panel->dp_mode.drm_mode.clock); + msm_dp_panel->msm_dp_mode.bpp = msm_dp_panel_get_mode_bpp(msm_dp_panel, msm_dp_panel->msm_dp_mode.bpp, + msm_dp_panel->msm_dp_mode.drm_mode.clock); drm_dbg_dp(panel->drm_dev, "updated bpp = %d\n", - dp_panel->dp_mode.bpp); + msm_dp_panel->msm_dp_mode.bpp); return 0; } -static u32 dp_panel_link_frequencies(struct device_node *of_node) +static u32 msm_dp_panel_link_frequencies(struct device_node *of_node) { struct device_node *endpoint; u64 frequency = 0; @@ -456,17 +456,17 @@ static u32 dp_panel_link_frequencies(struct device_node *of_node) return frequency; } -static int dp_panel_parse_dt(struct dp_panel *dp_panel) +static int msm_dp_panel_parse_dt(struct msm_dp_panel *msm_dp_panel) { - struct dp_panel_private *panel; + struct msm_dp_panel_private *panel; struct device_node *of_node; int cnt; - panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel); of_node = panel->dev->of_node; /* - * data-lanes is the property of dp_out endpoint + * data-lanes is the property of msm_dp_out endpoint */ cnt = drm_of_get_data_lanes_count_ep(of_node, 1, 0, 1, DP_MAX_NUM_DP_LANES); if (cnt < 0) { @@ -475,21 +475,21 @@ static int dp_panel_parse_dt(struct dp_panel *dp_panel) } if (cnt > 0) - dp_panel->max_dp_lanes = cnt; + msm_dp_panel->max_dp_lanes = cnt; else - dp_panel->max_dp_lanes = DP_MAX_NUM_DP_LANES; /* 4 lanes */ + msm_dp_panel->max_dp_lanes = DP_MAX_NUM_DP_LANES; /* 4 lanes */ - dp_panel->max_dp_link_rate = dp_panel_link_frequencies(of_node); - if (!dp_panel->max_dp_link_rate) - dp_panel->max_dp_link_rate = DP_LINK_RATE_HBR2; + msm_dp_panel->max_dp_link_rate = msm_dp_panel_link_frequencies(of_node); + if (!msm_dp_panel->max_dp_link_rate) + msm_dp_panel->max_dp_link_rate = DP_LINK_RATE_HBR2; return 0; } -struct dp_panel *dp_panel_get(struct dp_panel_in *in) +struct msm_dp_panel *msm_dp_panel_get(struct msm_dp_panel_in *in) { - struct dp_panel_private *panel; - struct dp_panel *dp_panel; + struct msm_dp_panel_private *panel; + struct msm_dp_panel *msm_dp_panel; int ret; if (!in->dev || !in->catalog || !in->aux || !in->link) { @@ -506,20 +506,20 @@ struct dp_panel *dp_panel_get(struct dp_panel_in *in) panel->catalog = in->catalog; panel->link = in->link; - dp_panel = &panel->dp_panel; - dp_panel->max_bw_code = DP_LINK_BW_8_1; + msm_dp_panel = &panel->msm_dp_panel; + msm_dp_panel->max_bw_code = DP_LINK_BW_8_1; - ret = dp_panel_parse_dt(dp_panel); + ret = msm_dp_panel_parse_dt(msm_dp_panel); if (ret) return ERR_PTR(ret); - return dp_panel; + return msm_dp_panel; } -void dp_panel_put(struct dp_panel *dp_panel) +void msm_dp_panel_put(struct msm_dp_panel *msm_dp_panel) { - if (!dp_panel) + if (!msm_dp_panel) return; - drm_edid_free(dp_panel->drm_edid); + drm_edid_free(msm_dp_panel->drm_edid); } diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h index 6722e3923fa5..0e944db3adf2 100644 --- a/drivers/gpu/drm/msm/dp/dp_panel.h +++ b/drivers/gpu/drm/msm/dp/dp_panel.h @@ -13,7 +13,7 @@ struct edid; -struct dp_display_mode { +struct msm_dp_display_mode { struct drm_display_mode drm_mode; u32 bpp; u32 h_active_low; @@ -21,28 +21,28 @@ struct dp_display_mode { bool out_fmt_is_yuv_420; }; -struct dp_panel_in { +struct msm_dp_panel_in { struct device *dev; struct drm_dp_aux *aux; - struct dp_link *link; - struct dp_catalog *catalog; + struct msm_dp_link *link; + struct msm_dp_catalog *catalog; }; -struct dp_panel_psr { +struct msm_dp_panel_psr { u8 version; u8 capabilities; }; -struct dp_panel { +struct msm_dp_panel { /* dpcd raw data */ u8 dpcd[DP_RECEIVER_CAP_SIZE]; u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; - struct dp_link_info link_info; + struct msm_dp_link_info link_info; const struct drm_edid *drm_edid; struct drm_connector *connector; - struct dp_display_mode dp_mode; - struct dp_panel_psr psr_cap; + struct msm_dp_display_mode msm_dp_mode; + struct msm_dp_panel_psr psr_cap; bool video_test; bool vsc_sdp_supported; @@ -52,18 +52,18 @@ struct dp_panel { u32 max_bw_code; }; -int dp_panel_init_panel_info(struct dp_panel *dp_panel); -int dp_panel_deinit(struct dp_panel *dp_panel); -int dp_panel_timing_cfg(struct dp_panel *dp_panel); -void dp_panel_dump_regs(struct dp_panel *dp_panel); -int dp_panel_read_sink_caps(struct dp_panel *dp_panel, +int msm_dp_panel_init_panel_info(struct msm_dp_panel *msm_dp_panel); +int msm_dp_panel_deinit(struct msm_dp_panel *msm_dp_panel); +int msm_dp_panel_timing_cfg(struct msm_dp_panel *msm_dp_panel); +void msm_dp_panel_dump_regs(struct msm_dp_panel *msm_dp_panel); +int msm_dp_panel_read_sink_caps(struct msm_dp_panel *msm_dp_panel, struct drm_connector *connector); -u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel, u32 mode_max_bpp, +u32 msm_dp_panel_get_mode_bpp(struct msm_dp_panel *msm_dp_panel, u32 mode_max_bpp, u32 mode_pclk_khz); -int dp_panel_get_modes(struct dp_panel *dp_panel, +int msm_dp_panel_get_modes(struct msm_dp_panel *msm_dp_panel, struct drm_connector *connector); -void dp_panel_handle_sink_request(struct dp_panel *dp_panel); -void dp_panel_tpg_config(struct dp_panel *dp_panel, bool enable); +void msm_dp_panel_handle_sink_request(struct msm_dp_panel *msm_dp_panel); +void msm_dp_panel_tpg_config(struct msm_dp_panel *msm_dp_panel, bool enable); /** * is_link_rate_valid() - validates the link rate @@ -80,7 +80,7 @@ static inline bool is_link_rate_valid(u32 bw_code) } /** - * dp_link_is_lane_count_valid() - validates the lane count + * msm_dp_link_is_lane_count_valid() - validates the lane count * @lane_count: lane count requested by the sink * * Returns true if the requested lane count is supported. @@ -92,6 +92,6 @@ static inline bool is_lane_count_valid(u32 lane_count) lane_count == 4); } -struct dp_panel *dp_panel_get(struct dp_panel_in *in); -void dp_panel_put(struct dp_panel *dp_panel); +struct msm_dp_panel *msm_dp_panel_get(struct msm_dp_panel_in *in); +void msm_dp_panel_put(struct msm_dp_panel *msm_dp_panel); #endif /* _DP_PANEL_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_utils.c b/drivers/gpu/drm/msm/dp/dp_utils.c index da9207caf72d..2a40f07fe2d5 100644 --- a/drivers/gpu/drm/msm/dp/dp_utils.c +++ b/drivers/gpu/drm/msm/dp/dp_utils.c @@ -9,7 +9,7 @@ #define DP_SDP_HEADER_SIZE 8 -u8 dp_utils_get_g0_value(u8 data) +u8 msm_dp_utils_get_g0_value(u8 data) { u8 c[4]; u8 g[4]; @@ -30,7 +30,7 @@ u8 dp_utils_get_g0_value(u8 data) return ret_data; } -u8 dp_utils_get_g1_value(u8 data) +u8 msm_dp_utils_get_g1_value(u8 data) { u8 c[4]; u8 g[4]; @@ -51,7 +51,7 @@ u8 dp_utils_get_g1_value(u8 data) return ret_data; } -u8 dp_utils_calculate_parity(u32 data) +u8 msm_dp_utils_calculate_parity(u32 data) { u8 x0 = 0; u8 x1 = 0; @@ -65,8 +65,8 @@ u8 dp_utils_calculate_parity(u32 data) iData = (data >> i * 4) & 0xF; ci = iData ^ x1; - x1 = x0 ^ dp_utils_get_g1_value(ci); - x0 = dp_utils_get_g0_value(ci); + x1 = x0 ^ msm_dp_utils_get_g1_value(ci); + x0 = msm_dp_utils_get_g0_value(ci); } parity_byte = x1 | (x0 << 4); @@ -74,7 +74,7 @@ u8 dp_utils_calculate_parity(u32 data) return parity_byte; } -ssize_t dp_utils_pack_sdp_header(struct dp_sdp_header *sdp_header, u32 *header_buff) +ssize_t msm_dp_utils_pack_sdp_header(struct dp_sdp_header *sdp_header, u32 *header_buff) { size_t length; @@ -83,14 +83,14 @@ ssize_t dp_utils_pack_sdp_header(struct dp_sdp_header *sdp_header, u32 *header_b return -ENOSPC; header_buff[0] = FIELD_PREP(HEADER_0_MASK, sdp_header->HB0) | - FIELD_PREP(PARITY_0_MASK, dp_utils_calculate_parity(sdp_header->HB0)) | + FIELD_PREP(PARITY_0_MASK, msm_dp_utils_calculate_parity(sdp_header->HB0)) | FIELD_PREP(HEADER_1_MASK, sdp_header->HB1) | - FIELD_PREP(PARITY_1_MASK, dp_utils_calculate_parity(sdp_header->HB1)); + FIELD_PREP(PARITY_1_MASK, msm_dp_utils_calculate_parity(sdp_header->HB1)); header_buff[1] = FIELD_PREP(HEADER_2_MASK, sdp_header->HB2) | - FIELD_PREP(PARITY_2_MASK, dp_utils_calculate_parity(sdp_header->HB2)) | + FIELD_PREP(PARITY_2_MASK, msm_dp_utils_calculate_parity(sdp_header->HB2)) | FIELD_PREP(HEADER_3_MASK, sdp_header->HB3) | - FIELD_PREP(PARITY_3_MASK, dp_utils_calculate_parity(sdp_header->HB3)); + FIELD_PREP(PARITY_3_MASK, msm_dp_utils_calculate_parity(sdp_header->HB3)); return length; } diff --git a/drivers/gpu/drm/msm/dp/dp_utils.h b/drivers/gpu/drm/msm/dp/dp_utils.h index 7c056d9798dc..88d53157f5b5 100644 --- a/drivers/gpu/drm/msm/dp/dp_utils.h +++ b/drivers/gpu/drm/msm/dp/dp_utils.h @@ -28,9 +28,9 @@ #define HEADER_3_MASK GENMASK(23, 16) #define PARITY_3_MASK GENMASK(31, 24) -u8 dp_utils_get_g0_value(u8 data); -u8 dp_utils_get_g1_value(u8 data); -u8 dp_utils_calculate_parity(u32 data); -ssize_t dp_utils_pack_sdp_header(struct dp_sdp_header *sdp_header, u32 *header_buff); +u8 msm_dp_utils_get_g0_value(u8 data); +u8 msm_dp_utils_get_g1_value(u8 data); +u8 msm_dp_utils_calculate_parity(u32 data); +ssize_t msm_dp_utils_pack_sdp_header(struct dp_sdp_header *sdp_header, u32 *header_buff); #endif /* _DP_UTILS_H_ */ diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c index efd7c23b662f..296215877613 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.c +++ b/drivers/gpu/drm/msm/dsi/dsi.c @@ -207,7 +207,7 @@ static const struct dev_pm_ops dsi_pm_ops = { static struct platform_driver dsi_driver = { .probe = dsi_dev_probe, - .remove_new = dsi_dev_remove, + .remove = dsi_dev_remove, .driver = { .name = "msm_dsi", .of_match_table = dt_match, diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index 185d7de0bf37..a98d24b7cb00 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -542,7 +542,7 @@ static unsigned long dsi_adjust_pclk_for_compression(const struct drm_display_mo int new_htotal = mode->htotal - mode->hdisplay + new_hdisplay; - return new_htotal * mode->vtotal * drm_mode_vrefresh(mode); + return mult_frac(mode->clock * 1000u, new_htotal, mode->htotal); } static unsigned long dsi_get_pclk_rate(const struct drm_display_mode *mode, @@ -550,7 +550,7 @@ static unsigned long dsi_get_pclk_rate(const struct drm_display_mode *mode, { unsigned long pclk_rate; - pclk_rate = mode->clock * 1000; + pclk_rate = mode->clock * 1000u; if (dsc) pclk_rate = dsi_adjust_pclk_for_compression(mode, dsc); diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c index 0bfee41c2e71..37b3809c6bdd 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi.c @@ -561,7 +561,7 @@ static const struct of_device_id msm_hdmi_dt_match[] = { static struct platform_driver msm_hdmi_driver = { .probe = msm_hdmi_dev_probe, - .remove_new = msm_hdmi_dev_remove, + .remove = msm_hdmi_dev_remove, .driver = { .name = "hdmi_msm", .of_match_table = msm_hdmi_dt_match, diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c index 95b3f7535d84..03120c54ced6 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c @@ -203,7 +203,7 @@ static const struct of_device_id msm_hdmi_phy_dt_match[] = { static struct platform_driver msm_hdmi_phy_platform_driver = { .probe = msm_hdmi_phy_probe, - .remove_new = msm_hdmi_phy_remove, + .remove = msm_hdmi_phy_remove, .driver = { .name = "msm_hdmi_phy", .of_match_table = msm_hdmi_phy_dt_match, diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c index 0e3a2b16a2ce..a719fd33d9d8 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c @@ -153,22 +153,12 @@ static inline u32 pll_get_pll_cmp(u64 fdata, unsigned long ref_clk) return dividend - 1; } -static inline u64 pll_cmp_to_fdata(u32 pll_cmp, unsigned long ref_clk) -{ - u64 fdata = ((u64)pll_cmp) * ref_clk * 10; - - do_div(fdata, HDMI_PLL_CMP_CNT); - - return fdata; -} - #define HDMI_REF_CLOCK_HZ ((u64)19200000) #define HDMI_MHZ_TO_HZ ((u64)1000000) static int pll_get_post_div(struct hdmi_8998_post_divider *pd, u64 bclk) { - u32 const ratio_list[] = {1, 2, 3, 4, 5, 6, - 9, 10, 12, 15, 25}; - u32 const band_list[] = {0, 1, 2, 3}; + static const u32 ratio_list[] = {1, 2, 3, 4, 5, 6, 9, 10, 12, 15, 25}; + static const u32 band_list[] = {0, 1, 2, 3}; u32 const sz_ratio = ARRAY_SIZE(ratio_list); u32 const sz_band = ARRAY_SIZE(band_list); u32 const cmp_cnt = 1024; @@ -279,7 +269,7 @@ find_optimal_index: case 25: found_hsclk_divsel = 14; break; - }; + } pd->vco_freq = found_vco_freq; pd->tx_band_sel = found_tx_band_sel; diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index edbc1ab0fbc8..ffbcc97b5018 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -985,6 +985,10 @@ module_param(prefer_mdp5, bool, 0444); /* list all platforms supported by both mdp5 and dpu drivers */ static const char *const msm_mdp5_dpu_migration[] = { + "qcom,msm8917-mdp5", + "qcom,msm8937-mdp5", + "qcom,msm8953-mdp5", + "qcom,msm8996-mdp5", "qcom,sdm630-mdp5", "qcom,sdm660-mdp5", NULL, @@ -1107,7 +1111,7 @@ static void msm_pdev_remove(struct platform_device *pdev) static struct platform_driver msm_platform_driver = { .probe = msm_pdev_probe, - .remove_new = msm_pdev_remove, + .remove = msm_pdev_remove, .driver = { .name = "msm", }, diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 133e47bea7d5..d8c9a1b19263 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -52,7 +52,6 @@ struct msm_gem_vma; struct msm_disp_state; #define MAX_CRTCS 8 -#define MAX_BRIDGES 8 #define FRAC_16_16(mult, div) (((mult) << 16) / (div)) @@ -71,23 +70,6 @@ enum msm_dsi_controller { }; #define MSM_GPU_MAX_RINGS 4 -#define MAX_H_TILES_PER_DISPLAY 2 - -/** - * struct msm_display_topology - defines a display topology pipeline - * @num_lm: number of layer mixers used - * @num_intf: number of interfaces the panel is mounted on - * @num_dspp: number of dspp blocks used - * @num_dsc: number of Display Stream Compression (DSC) blocks used - * @needs_cdm: indicates whether cdm block is needed for this display topology - */ -struct msm_display_topology { - u32 num_lm; - u32 num_intf; - u32 num_dspp; - u32 num_dsc; - bool needs_cdm; -}; /* Commit/Event thread specific structure */ struct msm_drm_thread { diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index a274b8466423..0d4a3744cfcb 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -783,7 +783,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) mutex_unlock(&gpu->active_lock); gpu->funcs->submit(gpu, submit); - gpu->cur_ctx_seqno = submit->queue->ctx->seqno; + submit->ring->cur_ctx_seqno = submit->queue->ctx->seqno; pm_runtime_put(&gpu->pdev->dev); hangcheck_timer_reset(gpu); diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index 1f02bb9956be..7cabc8480d7c 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -194,17 +194,6 @@ struct msm_gpu { refcount_t sysprof_active; /** - * cur_ctx_seqno: - * - * The ctx->seqno value of the last context to submit rendering, - * and the one with current pgtables installed (for generations - * that support per-context pgtables). Tracked by seqno rather - * than pointer value to avoid dangling pointers, and cases where - * a ctx can be freed and a new one created with the same address. - */ - int cur_ctx_seqno; - - /** * lock: * * General lock for serializing all the gpu things. diff --git a/drivers/gpu/drm/msm/msm_gpu_devfreq.c b/drivers/gpu/drm/msm/msm_gpu_devfreq.c index ea70c1c32d94..6970b0f7f457 100644 --- a/drivers/gpu/drm/msm/msm_gpu_devfreq.c +++ b/drivers/gpu/drm/msm/msm_gpu_devfreq.c @@ -140,6 +140,7 @@ void msm_devfreq_init(struct msm_gpu *gpu) { struct msm_gpu_devfreq *df = &gpu->devfreq; struct msm_drm_private *priv = gpu->dev->dev_private; + int ret; /* We need target support to do devfreq */ if (!gpu->funcs->gpu_busy) @@ -156,8 +157,12 @@ void msm_devfreq_init(struct msm_gpu *gpu) mutex_init(&df->lock); - dev_pm_qos_add_request(&gpu->pdev->dev, &df->boost_freq, - DEV_PM_QOS_MIN_FREQUENCY, 0); + ret = dev_pm_qos_add_request(&gpu->pdev->dev, &df->boost_freq, + DEV_PM_QOS_MIN_FREQUENCY, 0); + if (ret < 0) { + DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize QoS\n"); + return; + } msm_devfreq_profile.initial_freq = gpu->fast_rate; diff --git a/drivers/gpu/drm/msm/msm_gpu_trace.h b/drivers/gpu/drm/msm/msm_gpu_trace.h index ac40d857bc45..7f863282db0d 100644 --- a/drivers/gpu/drm/msm/msm_gpu_trace.h +++ b/drivers/gpu/drm/msm/msm_gpu_trace.h @@ -177,6 +177,34 @@ TRACE_EVENT(msm_gpu_resume, TP_printk("%u", __entry->dummy) ); +TRACE_EVENT(msm_gpu_preemption_trigger, + TP_PROTO(int ring_id_from, int ring_id_to), + TP_ARGS(ring_id_from, ring_id_to), + TP_STRUCT__entry( + __field(int, ring_id_from) + __field(int, ring_id_to) + ), + TP_fast_assign( + __entry->ring_id_from = ring_id_from; + __entry->ring_id_to = ring_id_to; + ), + TP_printk("preempting %u -> %u", + __entry->ring_id_from, + __entry->ring_id_to) +); + +TRACE_EVENT(msm_gpu_preemption_irq, + TP_PROTO(u32 ring_id), + TP_ARGS(ring_id), + TP_STRUCT__entry( + __field(u32, ring_id) + ), + TP_fast_assign( + __entry->ring_id = ring_id; + ), + TP_printk("preempted to %u", __entry->ring_id) +); + #endif #undef TRACE_INCLUDE_PATH diff --git a/drivers/gpu/drm/msm/msm_kms.c b/drivers/gpu/drm/msm/msm_kms.c index af6a6fcb1173..f3326d09bdbc 100644 --- a/drivers/gpu/drm/msm/msm_kms.c +++ b/drivers/gpu/drm/msm/msm_kms.c @@ -5,11 +5,11 @@ * Author: Rob Clark <robdclark@gmail.com> */ +#include <linux/aperture.h> #include <linux/kthread.h> #include <linux/sched/mm.h> #include <uapi/linux/sched/types.h> -#include <drm/drm_aperture.h> #include <drm/drm_drv.h> #include <drm/drm_mode_config.h> #include <drm/drm_vblank.h> @@ -237,7 +237,7 @@ int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv) int ret; /* the fw fb could be anywhere in memory */ - ret = drm_aperture_remove_framebuffers(drv); + ret = aperture_remove_all_conflicting_devices(drv->name); if (ret) return ret; diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h index 1e0c54de3716..e60162744c66 100644 --- a/drivers/gpu/drm/msm/msm_kms.h +++ b/drivers/gpu/drm/msm/msm_kms.h @@ -92,12 +92,6 @@ struct msm_kms_funcs { * Format handling: */ - /* do format checking on format modified through fb_cmd2 modifiers */ - int (*check_modified_format)(const struct msm_kms *kms, - const struct msm_format *msm_fmt, - const struct drm_mode_fb_cmd2 *cmd, - struct drm_gem_object **bos); - /* misc: */ long (*round_pixclk)(struct msm_kms *kms, unsigned long rate, struct drm_encoder *encoder); diff --git a/drivers/gpu/drm/msm/msm_mdss.c b/drivers/gpu/drm/msm/msm_mdss.c index faa88fd6eb4d..76b6ae35a3cb 100644 --- a/drivers/gpu/drm/msm/msm_mdss.c +++ b/drivers/gpu/drm/msm/msm_mdss.c @@ -19,13 +19,7 @@ #include "msm_mdss.h" #include "msm_kms.h" -#define HW_REV 0x0 -#define HW_INTR_STATUS 0x0010 - -#define UBWC_DEC_HW_VERSION 0x58 -#define UBWC_STATIC 0x144 -#define UBWC_CTRL_2 0x150 -#define UBWC_PREDICTION_MODE 0x154 +#include <generated/mdss.xml.h> #define MIN_IB_BW 400000000UL /* Min ib vote 400MB */ @@ -83,7 +77,7 @@ static void msm_mdss_irq(struct irq_desc *desc) chained_irq_enter(chip, desc); - interrupts = readl_relaxed(msm_mdss->mmio + HW_INTR_STATUS); + interrupts = readl_relaxed(msm_mdss->mmio + REG_MDSS_HW_INTR_STATUS); while (interrupts) { irq_hw_number_t hwirq = fls(interrupts) - 1; @@ -173,7 +167,7 @@ static void msm_mdss_setup_ubwc_dec_20(struct msm_mdss *msm_mdss) { const struct msm_mdss_data *data = msm_mdss->mdss_data; - writel_relaxed(data->ubwc_static, msm_mdss->mmio + UBWC_STATIC); + writel_relaxed(data->ubwc_static, msm_mdss->mmio + REG_MDSS_UBWC_STATIC); } static void msm_mdss_setup_ubwc_dec_30(struct msm_mdss *msm_mdss) @@ -189,7 +183,7 @@ static void msm_mdss_setup_ubwc_dec_30(struct msm_mdss *msm_mdss) if (data->ubwc_enc_version == UBWC_1_0) value |= BIT(8); - writel_relaxed(value, msm_mdss->mmio + UBWC_STATIC); + writel_relaxed(value, msm_mdss->mmio + REG_MDSS_UBWC_STATIC); } static void msm_mdss_setup_ubwc_dec_40(struct msm_mdss *msm_mdss) @@ -200,21 +194,22 @@ static void msm_mdss_setup_ubwc_dec_40(struct msm_mdss *msm_mdss) (data->highest_bank_bit & 0x7) << 4 | (data->macrotile_mode & 0x1) << 12; - writel_relaxed(value, msm_mdss->mmio + UBWC_STATIC); + writel_relaxed(value, msm_mdss->mmio + REG_MDSS_UBWC_STATIC); if (data->ubwc_enc_version == UBWC_3_0) { - writel_relaxed(1, msm_mdss->mmio + UBWC_CTRL_2); - writel_relaxed(0, msm_mdss->mmio + UBWC_PREDICTION_MODE); + writel_relaxed(1, msm_mdss->mmio + REG_MDSS_UBWC_CTRL_2); + writel_relaxed(0, msm_mdss->mmio + REG_MDSS_UBWC_PREDICTION_MODE); } else { if (data->ubwc_dec_version == UBWC_4_3) - writel_relaxed(3, msm_mdss->mmio + UBWC_CTRL_2); + writel_relaxed(3, msm_mdss->mmio + REG_MDSS_UBWC_CTRL_2); else - writel_relaxed(2, msm_mdss->mmio + UBWC_CTRL_2); - writel_relaxed(1, msm_mdss->mmio + UBWC_PREDICTION_MODE); + writel_relaxed(2, msm_mdss->mmio + REG_MDSS_UBWC_CTRL_2); + writel_relaxed(1, msm_mdss->mmio + REG_MDSS_UBWC_PREDICTION_MODE); } } -#define MDSS_HW_MAJ_MIN GENMASK(31, 16) +#define MDSS_HW_MAJ_MIN \ + (MDSS_HW_VERSION_MAJOR__MASK | MDSS_HW_VERSION_MINOR__MASK) #define MDSS_HW_MSM8996 0x1007 #define MDSS_HW_MSM8937 0x100e @@ -235,7 +230,7 @@ static const struct msm_mdss_data *msm_mdss_generate_mdp5_mdss_data(struct msm_m if (!data) return NULL; - hw_rev = readl_relaxed(mdss->mmio + HW_REV); + hw_rev = readl_relaxed(mdss->mmio + REG_MDSS_HW_VERSION); hw_rev = FIELD_GET(MDSS_HW_MAJ_MIN, hw_rev); if (hw_rev == MDSS_HW_MSM8996 || @@ -334,9 +329,9 @@ static int msm_mdss_enable(struct msm_mdss *msm_mdss) dev_err(msm_mdss->dev, "Unsupported UBWC decoder version %x\n", msm_mdss->mdss_data->ubwc_dec_version); dev_err(msm_mdss->dev, "HW_REV: 0x%x\n", - readl_relaxed(msm_mdss->mmio + HW_REV)); + readl_relaxed(msm_mdss->mmio + REG_MDSS_HW_VERSION)); dev_err(msm_mdss->dev, "UBWC_DEC_HW_VERSION: 0x%x\n", - readl_relaxed(msm_mdss->mmio + UBWC_DEC_HW_VERSION)); + readl_relaxed(msm_mdss->mmio + REG_MDSS_UBWC_DEC_HW_VERSION)); break; } @@ -573,6 +568,16 @@ static const struct msm_mdss_data qcm2290_data = { .reg_bus_bw = 76800, }; +static const struct msm_mdss_data sa8775p_data = { + .ubwc_enc_version = UBWC_4_0, + .ubwc_dec_version = UBWC_4_0, + .ubwc_swizzle = 4, + .ubwc_static = 1, + .highest_bank_bit = 0, + .macrotile_mode = 1, + .reg_bus_bw = 74000, +}; + static const struct msm_mdss_data sc7180_data = { .ubwc_enc_version = UBWC_2_0, .ubwc_dec_version = UBWC_2_0, @@ -710,6 +715,7 @@ static const struct of_device_id mdss_dt_match[] = { { .compatible = "qcom,mdss" }, { .compatible = "qcom,msm8998-mdss", .data = &msm8998_data }, { .compatible = "qcom,qcm2290-mdss", .data = &qcm2290_data }, + { .compatible = "qcom,sa8775p-mdss", .data = &sa8775p_data }, { .compatible = "qcom,sdm670-mdss", .data = &sdm670_data }, { .compatible = "qcom,sdm845-mdss", .data = &sdm845_data }, { .compatible = "qcom,sc7180-mdss", .data = &sc7180_data }, @@ -734,7 +740,7 @@ MODULE_DEVICE_TABLE(of, mdss_dt_match); static struct platform_driver mdss_platform_driver = { .probe = mdss_probe, - .remove_new = mdss_remove, + .remove = mdss_remove, .driver = { .name = "msm-mdss", .of_match_table = mdss_dt_match, diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c index 9d6655f96f0c..c803556a8f64 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.c +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c @@ -64,7 +64,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id, char name[32]; int ret; - /* We assume everwhere that MSM_GPU_RINGBUFFER_SZ is a power of 2 */ + /* We assume everywhere that MSM_GPU_RINGBUFFER_SZ is a power of 2 */ BUILD_BUG_ON(!is_power_of_2(MSM_GPU_RINGBUFFER_SZ)); ring = kzalloc(sizeof(*ring), GFP_KERNEL); diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.h b/drivers/gpu/drm/msm/msm_ringbuffer.h index 0d6beb8cd39a..d1e49f701c81 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.h +++ b/drivers/gpu/drm/msm/msm_ringbuffer.h @@ -31,10 +31,12 @@ struct msm_rbmemptrs { volatile uint32_t rptr; volatile uint32_t fence; /* Introduced on A7xx */ + volatile uint32_t bv_rptr; volatile uint32_t bv_fence; volatile struct msm_gpu_submit_stats stats[MSM_GPU_SUBMIT_STATS_COUNT]; volatile u64 ttbr0; + volatile u32 context_idr; }; struct msm_cp_state { @@ -99,6 +101,22 @@ struct msm_ringbuffer { * preemption. Can be aquired from irq context. */ spinlock_t preempt_lock; + + /* + * Whether we skipped writing wptr and it needs to be updated in the + * future when the ring becomes current. + */ + bool restore_wptr; + + /** + * cur_ctx_seqno: + * + * The ctx->seqno value of the last context to submit to this ring + * Tracked by seqno rather than pointer value to avoid dangling + * pointers, and cases where a ctx can be freed and a new one created + * with the same address. + */ + int cur_ctx_seqno; }; struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id, diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c index 0e803125a325..2fc3eaf81f44 100644 --- a/drivers/gpu/drm/msm/msm_submitqueue.c +++ b/drivers/gpu/drm/msm/msm_submitqueue.c @@ -161,6 +161,8 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx, struct msm_drm_private *priv = drm->dev_private; struct msm_gpu_submitqueue *queue; enum drm_sched_priority sched_prio; + extern int enable_preemption; + bool preemption_supported; unsigned ring_nr; int ret; @@ -170,6 +172,11 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx, if (!priv->gpu) return -ENODEV; + preemption_supported = priv->gpu->nr_rings == 1 && enable_preemption != 0; + + if (flags & MSM_SUBMITQUEUE_ALLOW_PREEMPT && preemption_supported) + return -EINVAL; + ret = msm_gpu_convert_priority(priv->gpu, prio, &ring_nr, &sched_prio); if (ret) return ret; diff --git a/drivers/gpu/drm/msm/registers/adreno/a6xx.xml b/drivers/gpu/drm/msm/registers/adreno/a6xx.xml index 97608603ea62..2db425abf0f3 100644 --- a/drivers/gpu/drm/msm/registers/adreno/a6xx.xml +++ b/drivers/gpu/drm/msm/registers/adreno/a6xx.xml @@ -2358,7 +2358,12 @@ to upconvert to 32b float internally? <reg32 offset="0x0" name="REG" type="a6x_cp_protect"/> </array> - <reg32 offset="0x08A0" name="CP_CONTEXT_SWITCH_CNTL"/> + <reg32 offset="0x08A0" name="CP_CONTEXT_SWITCH_CNTL"> + <bitfield name="STOP" pos="0" type="boolean"/> + <bitfield name="LEVEL" low="6" high="7"/> + <bitfield name="USES_GMEM" pos="8" type="boolean"/> + <bitfield name="SKIP_SAVE_RESTORE" pos="9" type="boolean"/> + </reg32> <reg64 offset="0x08A1" name="CP_CONTEXT_SWITCH_SMMU_INFO"/> <reg64 offset="0x08A3" name="CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR"/> <reg64 offset="0x08A5" name="CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR"/> diff --git a/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml index cab01af55d22..55a35182858c 100644 --- a/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml +++ b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml @@ -581,8 +581,7 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd"> and forcibly switch to the indicated context. </doc> <value name="CP_CONTEXT_SWITCH" value="0x54" variants="A6XX"/> - <!-- Note, kgsl calls this CP_SET_AMBLE: --> - <value name="CP_SET_CTXSWITCH_IB" value="0x55" variants="A6XX-"/> + <value name="CP_SET_AMBLE" value="0x55" variants="A6XX-"/> <!-- Seems to always have the payload: @@ -2013,42 +2012,38 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords) </reg32> </domain> -<domain name="CP_SET_CTXSWITCH_IB" width="32"> +<domain name="CP_SET_AMBLE" width="32"> <doc> - Used by the userspace driver to set various IB's which are - executed during context save/restore for handling - state that isn't restored by the - context switch routine itself. - </doc> - <enum name="ctxswitch_ib"> - <value name="RESTORE_IB" value="0"> + Used by the userspace and kernel drivers to set various IB's + which are executed during context save/restore for handling + state that isn't restored by the context switch routine itself. + </doc> + <enum name="amble_type"> + <value name="PREAMBLE_AMBLE_TYPE" value="0"> <doc>Executed unconditionally when switching back to the context.</doc> </value> - <value name="YIELD_RESTORE_IB" value="1"> + <value name="BIN_PREAMBLE_AMBLE_TYPE" value="1"> <doc> Executed when switching back after switching away during execution of - a CP_SET_MARKER packet with RM6_YIELD as the - payload *and* the normal save routine was - bypassed for a shorter one. I think this is - connected to the "skipsaverestore" bit set by - the kernel when preempting. + a CP_SET_MARKER packet with RM6_BIN_RENDER_END as the + payload *and* skipsaverestore is set. This is + expected to restore static register values not + saved when skipsaverestore is set. </doc> </value> - <value name="SAVE_IB" value="2"> + <value name="POSTAMBLE_AMBLE_TYPE" value="2"> <doc> Executed when switching away from the context, except for context switches initiated via CP_YIELD. </doc> </value> - <value name="RB_SAVE_IB" value="3"> + <value name="KMD_AMBLE_TYPE" value="3"> <doc> This can only be set by the RB (i.e. the kernel) and executes with protected mode off, but - is otherwise similar to SAVE_IB. - - Note, kgsl calls this CP_KMD_AMBLE_TYPE + is otherwise similar to POSTAMBLE_AMBLE_TYPE. </doc> </value> </enum> @@ -2060,7 +2055,7 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords) </reg32> <reg32 offset="2" name="2"> <bitfield name="DWORDS" low="0" high="19" type="uint"/> - <bitfield name="TYPE" low="20" high="21" type="ctxswitch_ib"/> + <bitfield name="TYPE" low="20" high="21" type="amble_type"/> </reg32> </domain> diff --git a/drivers/gpu/drm/msm/registers/display/mdp5.xml b/drivers/gpu/drm/msm/registers/display/mdp5.xml index 92f3263af170..8c9c4af350aa 100644 --- a/drivers/gpu/drm/msm/registers/display/mdp5.xml +++ b/drivers/gpu/drm/msm/registers/display/mdp5.xml @@ -9,22 +9,6 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd"> <domain name="VBIF" width="32"> </domain> -<domain name="MDSS" width="32"> - <reg32 offset="0x00000" name="HW_VERSION"> - <bitfield name="STEP" low="0" high="15" type="uint"/> - <bitfield name="MINOR" low="16" high="27" type="uint"/> - <bitfield name="MAJOR" low="28" high="31" type="uint"/> - </reg32> - - <reg32 offset="0x00010" name="HW_INTR_STATUS"> - <bitfield name="INTR_MDP" pos="0" type="boolean"/> - <bitfield name="INTR_DSI0" pos="4" type="boolean"/> - <bitfield name="INTR_DSI1" pos="5" type="boolean"/> - <bitfield name="INTR_HDMI" pos="8" type="boolean"/> - <bitfield name="INTR_EDP" pos="12" type="boolean"/> - </reg32> -</domain> - <domain name="MDP5" width="32"> <enum name="mdp5_intf_type"> diff --git a/drivers/gpu/drm/msm/registers/display/mdss.xml b/drivers/gpu/drm/msm/registers/display/mdss.xml new file mode 100644 index 000000000000..ac85caf1575c --- /dev/null +++ b/drivers/gpu/drm/msm/registers/display/mdss.xml @@ -0,0 +1,29 @@ +<?xml version="1.0" encoding="UTF-8"?> +<database xmlns="http://nouveau.freedesktop.org/" +xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" +xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd"> +<import file="freedreno_copyright.xml"/> + +<domain name="MDSS" width="32"> + <reg32 offset="0x00000" name="HW_VERSION"> + <bitfield name="STEP" low="0" high="15" type="uint"/> + <bitfield name="MINOR" low="16" high="27" type="uint"/> + <bitfield name="MAJOR" low="28" high="31" type="uint"/> + </reg32> + + <reg32 offset="0x00010" name="HW_INTR_STATUS"> + <bitfield name="INTR_MDP" pos="0" type="boolean"/> + <bitfield name="INTR_DSI0" pos="4" type="boolean"/> + <bitfield name="INTR_DSI1" pos="5" type="boolean"/> + <bitfield name="INTR_HDMI" pos="8" type="boolean"/> + <bitfield name="INTR_EDP" pos="12" type="boolean"/> + </reg32> + + <reg32 offset="0x00058" name="UBWC_DEC_HW_VERSION"/> + + <reg32 offset="0x00144" name="UBWC_STATIC"/> + <reg32 offset="0x00150" name="UBWC_CTRL_2"/> + <reg32 offset="0x00154" name="UBWC_PREDICTION_MODE"/> +</domain> + +</database> diff --git a/drivers/gpu/drm/mxsfb/lcdif_drv.c b/drivers/gpu/drm/mxsfb/lcdif_drv.c index 58ccad9c425d..51ae0b51b1e8 100644 --- a/drivers/gpu/drm/mxsfb/lcdif_drv.c +++ b/drivers/gpu/drm/mxsfb/lcdif_drv.c @@ -368,7 +368,7 @@ static const struct dev_pm_ops lcdif_pm_ops = { static struct platform_driver lcdif_platform_driver = { .probe = lcdif_probe, - .remove_new = lcdif_remove, + .remove = lcdif_remove, .shutdown = lcdif_shutdown, .driver = { .name = "imx-lcdif", diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c index 34a98717b72c..6b95e4eb3e4e 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c @@ -417,7 +417,7 @@ static const struct dev_pm_ops mxsfb_pm_ops = { static struct platform_driver mxsfb_platform_driver = { .probe = mxsfb_probe, - .remove_new = mxsfb_remove, + .remove = mxsfb_remove, .shutdown = mxsfb_shutdown, .driver = { .name = "mxsfb", diff --git a/drivers/gpu/drm/nouveau/dispnv50/tile.h b/drivers/gpu/drm/nouveau/dispnv50/tile.h new file mode 100644 index 000000000000..e2be82830cf7 --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv50/tile.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: MIT */ +#ifndef __NV50_TILE_H__ +#define __NV50_TILE_H__ + +#include <linux/types.h> +#include <linux/math.h> + +/* + * Tiling parameters for NV50+. + * GOB = Group of bytes, the main unit for tiling blocks. + * Tiling blocks are a power of 2 number of GOB. + * All GOBs and blocks have the same width: 64 bytes (so 16 pixels in 32bits). + * tile_mode is the log2 of the number of GOB per block. + */ + +#define NV_TILE_GOB_HEIGHT_TESLA 4 /* 4 x 64 bytes = 256 bytes for a GOB on Tesla*/ +#define NV_TILE_GOB_HEIGHT 8 /* 8 x 64 bytes = 512 bytes for a GOB on Fermi and later */ +#define NV_TILE_GOB_WIDTH_BYTES 64 + +/* Number of blocks to cover the width of the framebuffer */ +static inline u32 nouveau_get_width_in_blocks(u32 stride) +{ + return DIV_ROUND_UP(stride, NV_TILE_GOB_WIDTH_BYTES); +} + +/* Return the height in pixel of one GOB */ +static inline u32 nouveau_get_gob_height(u16 family) +{ + if (family == NV_DEVICE_INFO_V0_TESLA) + return NV_TILE_GOB_HEIGHT_TESLA; + else + return NV_TILE_GOB_HEIGHT; +} + +/* Number of blocks to cover the heigth of the framebuffer */ +static inline u32 nouveau_get_height_in_blocks(u32 height, u32 gobs_in_block, u16 family) +{ + return DIV_ROUND_UP(height, nouveau_get_gob_height(family) * gobs_in_block); +} + +/* Return the GOB size in bytes */ +static inline u32 nouveau_get_gob_size(u16 family) +{ + return nouveau_get_gob_height(family) * NV_TILE_GOB_WIDTH_BYTES; +} + +/* Return the number of GOB in a block */ +static inline int nouveau_get_gobs_in_block(u32 tile_mode, u16 chipset) +{ + if (chipset >= 0xc0) + return 1 << (tile_mode >> 4); + return 1 << tile_mode; +} + +/* Return true if tile_mode is invalid */ +static inline bool nouveau_check_tile_mode(u32 tile_mode, u16 chipset) +{ + if (chipset >= 0xc0) + return (tile_mode & 0xfffff0f); + return (tile_mode & 0xfffffff0); +} + +#endif diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c index 7a2cceaee6e9..f6be426dd525 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c @@ -30,14 +30,20 @@ #include <nvhw/class/cl507e.h> #include <nvhw/class/clc37e.h> +#include <linux/iosys-map.h> + #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_blend.h> -#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_fourcc.h> +#include <drm/drm_framebuffer.h> +#include <drm/drm_gem_atomic_helper.h> +#include <drm/drm_panic.h> +#include <drm/ttm/ttm_bo.h> #include "nouveau_bo.h" #include "nouveau_gem.h" +#include "tile.h" static void nv50_wndw_ctxdma_del(struct nv50_wndw_ctxdma *ctxdma) @@ -577,6 +583,114 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state) return 0; } +/* Only used by drm_panic get_scanout_buffer() and set_pixel(), so it is + * protected by the drm panic spinlock + */ +static u32 nv50_panic_blk_h; + +/* Return the framebuffer offset of the start of the block where pixel(x,y) is */ +static u32 +nv50_get_block_off(unsigned int x, unsigned int y, unsigned int pitch) +{ + u32 blk_x, blk_y, blk_columns; + + blk_columns = nouveau_get_width_in_blocks(pitch); + blk_x = (x * 4) / NV_TILE_GOB_WIDTH_BYTES; + blk_y = y / nv50_panic_blk_h; + + return ((blk_y * blk_columns) + blk_x) * NV_TILE_GOB_WIDTH_BYTES * nv50_panic_blk_h; +} + +/* Turing and later have 2 level of tiles inside the block */ +static void +nv50_set_pixel_swizzle(struct drm_scanout_buffer *sb, unsigned int x, + unsigned int y, u32 color) +{ + u32 blk_off, off, swizzle; + + blk_off = nv50_get_block_off(x, y, sb->pitch[0]); + + y = y % nv50_panic_blk_h; + + /* Inside the block, use the fast address swizzle to compute the offset + * For nvidia blocklinear, bit order is yn..y3 x3 y2 x2 y1 y0 x1 x0 + */ + swizzle = (x & 3) | (y & 3) << 2 | (x & 4) << 2 | (y & 4) << 3; + swizzle |= (x & 8) << 3 | (y >> 3) << 7; + off = blk_off + swizzle * 4; + + iosys_map_wr(&sb->map[0], off, u32, color); +} + +static void +nv50_set_pixel(struct drm_scanout_buffer *sb, unsigned int x, unsigned int y, + u32 color) +{ + u32 blk_off, off; + + blk_off = nv50_get_block_off(x, y, sb->width); + + x = x % (NV_TILE_GOB_WIDTH_BYTES / 4); + y = y % nv50_panic_blk_h; + off = blk_off + x * 4 + y * NV_TILE_GOB_WIDTH_BYTES; + + iosys_map_wr(&sb->map[0], off, u32, color); +} + +static int +nv50_wndw_get_scanout_buffer(struct drm_plane *plane, struct drm_scanout_buffer *sb) +{ + struct drm_framebuffer *fb; + struct nouveau_bo *nvbo; + struct nouveau_drm *drm = nouveau_drm(plane->dev); + u16 chipset = drm->client.device.info.chipset; + u8 family = drm->client.device.info.family; + u32 tile_mode; + u8 kind; + + if (!plane->state || !plane->state->fb) + return -EINVAL; + + fb = plane->state->fb; + nvbo = nouveau_gem_object(fb->obj[0]); + + /* Don't support compressed format, or multiplane yet. */ + if (nvbo->comp || fb->format->num_planes != 1) + return -EOPNOTSUPP; + + if (nouveau_bo_map(nvbo)) { + drm_warn(plane->dev, "nouveau bo map failed, panic won't be displayed\n"); + return -ENOMEM; + } + + if (nvbo->kmap.bo_kmap_type & TTM_BO_MAP_IOMEM_MASK) + iosys_map_set_vaddr_iomem(&sb->map[0], (void __iomem *)nvbo->kmap.virtual); + else + iosys_map_set_vaddr(&sb->map[0], nvbo->kmap.virtual); + + sb->height = fb->height; + sb->width = fb->width; + sb->pitch[0] = fb->pitches[0]; + sb->format = fb->format; + + nouveau_framebuffer_get_layout(fb, &tile_mode, &kind); + if (kind) { + /* If tiling is enabled, use set_pixel() to display correctly. + * Only handle 32bits format for now. + */ + if (fb->format->cpp[0] != 4) + return -EOPNOTSUPP; + nv50_panic_blk_h = nouveau_get_gob_height(family) * + nouveau_get_gobs_in_block(tile_mode, chipset); + + if (chipset >= 0x160) + sb->set_pixel = nv50_set_pixel_swizzle; + else + sb->set_pixel = nv50_set_pixel; + } + return 0; +} + static const struct drm_plane_helper_funcs nv50_wndw_helper = { .prepare_fb = nv50_wndw_prepare_fb, @@ -584,6 +698,14 @@ nv50_wndw_helper = { .atomic_check = nv50_wndw_atomic_check, }; +static const struct drm_plane_helper_funcs +nv50_wndw_primary_helper = { + .prepare_fb = nv50_wndw_prepare_fb, + .cleanup_fb = nv50_wndw_cleanup_fb, + .atomic_check = nv50_wndw_atomic_check, + .get_scanout_buffer = nv50_wndw_get_scanout_buffer, +}; + static void nv50_wndw_atomic_destroy_state(struct drm_plane *plane, struct drm_plane_state *state) @@ -732,7 +854,10 @@ nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev, return ret; } - drm_plane_helper_add(&wndw->plane, &nv50_wndw_helper); + if (type == DRM_PLANE_TYPE_PRIMARY) + drm_plane_helper_add(&wndw->plane, &nv50_wndw_primary_helper); + else + drm_plane_helper_add(&wndw->plane, &nv50_wndw_helper); if (wndw->func->ilut) { ret = nv50_lut_init(disp, mmu, &wndw->ilut); diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h index 9e6f39912368..a2055f2a014a 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h @@ -210,7 +210,7 @@ struct nvkm_gsp { } *rm; struct { - struct mutex mutex;; + struct mutex mutex; struct idr idr; } client_id; diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index e2fd561cd23f..add006fc8d81 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -28,8 +28,8 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_event.h> #include <drm/drm_crtc_helper.h> -#include <drm/drm_fb_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_probe_helper.h> @@ -44,6 +44,7 @@ #include <nvif/if0011.h> #include <nvif/if0013.h> #include <dispnv50/crc.h> +#include <dispnv50/tile.h> int nouveau_display_vblank_enable(struct drm_crtc *crtc) @@ -220,69 +221,29 @@ nouveau_validate_decode_mod(struct nouveau_drm *drm, return 0; } -static inline uint32_t -nouveau_get_width_in_blocks(uint32_t stride) -{ - /* GOBs per block in the x direction is always one, and GOBs are - * 64 bytes wide - */ - static const uint32_t log_block_width = 6; - - return (stride + (1 << log_block_width) - 1) >> log_block_width; -} - -static inline uint32_t -nouveau_get_height_in_blocks(struct nouveau_drm *drm, - uint32_t height, - uint32_t log_block_height_in_gobs) -{ - uint32_t log_gob_height; - uint32_t log_block_height; - - BUG_ON(drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA); - - if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI) - log_gob_height = 2; - else - log_gob_height = 3; - - log_block_height = log_block_height_in_gobs + log_gob_height; - - return (height + (1 << log_block_height) - 1) >> log_block_height; -} - static int nouveau_check_bl_size(struct nouveau_drm *drm, struct nouveau_bo *nvbo, uint32_t offset, uint32_t stride, uint32_t h, uint32_t tile_mode) { - uint32_t gob_size, bw, bh; + uint32_t gob_size, bw, bh, gobs_in_block; uint64_t bl_size; BUG_ON(drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA); - if (drm->client.device.info.chipset >= 0xc0) { - if (tile_mode & 0xF) - return -EINVAL; - tile_mode >>= 4; - } - - if (tile_mode & 0xFFFFFFF0) + if (nouveau_check_tile_mode(tile_mode, drm->client.device.info.chipset)) return -EINVAL; - if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI) - gob_size = 256; - else - gob_size = 512; - + gobs_in_block = nouveau_get_gobs_in_block(tile_mode, drm->client.device.info.chipset); bw = nouveau_get_width_in_blocks(stride); - bh = nouveau_get_height_in_blocks(drm, h, tile_mode); + bh = nouveau_get_height_in_blocks(h, gobs_in_block, drm->client.device.info.family); + gob_size = nouveau_get_gob_size(drm->client.device.info.family); - bl_size = bw * bh * (1 << tile_mode) * gob_size; + bl_size = bw * bh * gobs_in_block * gob_size; - DRM_DEBUG_KMS("offset=%u stride=%u h=%u tile_mode=0x%02x bw=%u bh=%u gob_size=%u bl_size=%llu size=%zu\n", - offset, stride, h, tile_mode, bw, bh, gob_size, bl_size, - nvbo->bo.base.size); + DRM_DEBUG_KMS("offset=%u stride=%u h=%u gobs_in_block=%u bw=%u bh=%u gob_size=%u bl_size=%llu size=%zu\n", + offset, stride, h, gobs_in_block, bw, bh, gob_size, + bl_size, nvbo->bo.base.size); if (bl_size + offset > nvbo->bo.base.size) return -ERANGE; @@ -804,8 +765,7 @@ nouveau_display_suspend(struct drm_device *dev, bool runtime) { struct nouveau_display *disp = nouveau_display(dev); - /* Disable console. */ - drm_fb_helper_set_suspend_unlocked(dev->fb_helper, true); + drm_client_dev_suspend(dev, false); if (drm_drv_uses_atomic_modeset(dev)) { if (!runtime) { @@ -836,8 +796,7 @@ nouveau_display_resume(struct drm_device *dev, bool runtime) } } - /* Enable console. */ - drm_fb_helper_set_suspend_unlocked(dev->fb_helper, false); + drm_client_dev_resume(dev, false); } int diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c index 1f2d649f4b96..1a072568cef6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -193,7 +193,7 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf) if (!spage || !(src & MIGRATE_PFN_MIGRATE)) goto done; - dpage = alloc_page_vma(GFP_HIGHUSER, vmf->vma, vmf->address); + dpage = alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vmf->vma, vmf->address); if (!dpage) goto done; diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 84d692d68817..107f63f08bd9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -22,6 +22,7 @@ * Authors: Ben Skeggs */ +#include <linux/aperture.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/pci.h> @@ -30,7 +31,6 @@ #include <linux/mmu_notifier.h> #include <linux/dynamic_debug.h> -#include <drm/drm_aperture.h> #include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_ttm.h> @@ -332,7 +332,7 @@ nouveau_accel_ce_init(struct nouveau_drm *drm) return; } - ret = nouveau_channel_new(&drm->client, false, runm, NvDmaFB, NvDmaTT, &drm->cechan); + ret = nouveau_channel_new(&drm->client, true, runm, NvDmaFB, NvDmaTT, &drm->cechan); if (ret) NV_ERROR(drm, "failed to create ce channel, %d\n", ret); } @@ -851,7 +851,7 @@ static int nouveau_drm_probe(struct pci_dev *pdev, return ret; /* Remove conflicting drivers (vesafb, efifb etc). */ - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver_pci); + ret = aperture_remove_conflicting_pci_devices(pdev, driver_pci.name); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c index 829fdc6e4031..a5ce8eb4a3be 100644 --- a/drivers/gpu/drm/nouveau/nouveau_platform.c +++ b/drivers/gpu/drm/nouveau/nouveau_platform.c @@ -86,5 +86,5 @@ struct platform_driver nouveau_platform_driver = { .of_match_table = of_match_ptr(nouveau_platform_match), }, .probe = nouveau_platform_probe, - .remove_new = nouveau_platform_remove, + .remove = nouveau_platform_remove, }; diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c index ab4e11dc0b8a..a6c375a24154 100644 --- a/drivers/gpu/drm/nouveau/nouveau_vga.c +++ b/drivers/gpu/drm/nouveau/nouveau_vga.c @@ -2,7 +2,7 @@ #include <linux/vgaarb.h> #include <linux/vga_switcheroo.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_client_event.h> #include "nouveau_drv.h" #include "nouveau_acpi.h" diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c index 027867c2a8c5..99110ab2f44d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c @@ -992,7 +992,7 @@ r535_dp_train_target(struct nvkm_outp *outp, u8 target, bool mst, u8 link_nr, u8 ctrl->data = data; ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); - if (ret == -EAGAIN && ctrl->retryTimeMs) { + if ((ret == -EAGAIN || ret == -EBUSY) && ctrl->retryTimeMs) { /* * Device (likely an eDP panel) isn't ready yet, wait for the time specified * by GSP before retrying again @@ -1060,33 +1060,44 @@ r535_dp_aux_xfer(struct nvkm_outp *outp, u8 type, u32 addr, u8 *data, u8 *psize) NV0073_CTRL_DP_AUXCH_CTRL_PARAMS *ctrl; u8 size = *psize; int ret; + int retries; - ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DP_AUXCH_CTRL, sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); + for (retries = 0; retries < 3; ++retries) { + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DP_AUXCH_CTRL, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); - ctrl->subDeviceInstance = 0; - ctrl->displayId = BIT(outp->index); - ctrl->bAddrOnly = !size; - ctrl->cmd = type; - if (ctrl->bAddrOnly) { - ctrl->cmd = NVDEF_SET(ctrl->cmd, NV0073_CTRL, DP_AUXCH_CMD, REQ_TYPE, WRITE); - ctrl->cmd = NVDEF_SET(ctrl->cmd, NV0073_CTRL, DP_AUXCH_CMD, I2C_MOT, FALSE); - } - ctrl->addr = addr; - ctrl->size = !ctrl->bAddrOnly ? (size - 1) : 0; - memcpy(ctrl->data, data, size); + ctrl->subDeviceInstance = 0; + ctrl->displayId = BIT(outp->index); + ctrl->bAddrOnly = !size; + ctrl->cmd = type; + if (ctrl->bAddrOnly) { + ctrl->cmd = NVDEF_SET(ctrl->cmd, NV0073_CTRL, DP_AUXCH_CMD, REQ_TYPE, WRITE); + ctrl->cmd = NVDEF_SET(ctrl->cmd, NV0073_CTRL, DP_AUXCH_CMD, I2C_MOT, FALSE); + } + ctrl->addr = addr; + ctrl->size = !ctrl->bAddrOnly ? (size - 1) : 0; + memcpy(ctrl->data, data, size); - ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); - if (ret) { - nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); - return ret; + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if ((ret == -EAGAIN || ret == -EBUSY) && ctrl->retryTimeMs) { + /* + * Device (likely an eDP panel) isn't ready yet, wait for the time specified + * by GSP before retrying again + */ + nvkm_debug(&disp->engine.subdev, + "Waiting %dms for GSP LT panel delay before retrying in AUX\n", + ctrl->retryTimeMs); + msleep(ctrl->retryTimeMs); + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + } else { + memcpy(data, ctrl->data, size); + *psize = ctrl->size; + ret = ctrl->replyType; + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + break; + } } - - memcpy(data, ctrl->data, size); - *psize = ctrl->size; - ret = ctrl->replyType; - nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); return ret; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c index 060c74a80eb1..3ea447f6a45b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c @@ -443,6 +443,7 @@ gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *fifoch, ret = gf100_grctx_generate(gr, chan, fifoch->inst); if (ret) { nvkm_error(&base->engine.subdev, "failed to construct context\n"); + mutex_unlock(&gr->fecs.mutex); return ret; } } diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/fw.c b/drivers/gpu/drm/nouveau/nvkm/falcon/fw.c index a1c8545f1249..cac6d64ab67d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/falcon/fw.c +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/fw.c @@ -89,11 +89,6 @@ nvkm_falcon_fw_boot(struct nvkm_falcon_fw *fw, struct nvkm_subdev *user, nvkm_falcon_fw_dtor_sigs(fw); } - /* after last write to the img, sync dma mappings */ - dma_sync_single_for_device(fw->fw.device->dev, - fw->fw.phys, - sg_dma_len(&fw->fw.mem.sgl), - DMA_TO_DEVICE); FLCNFW_DBG(fw, "resetting"); fw->func->reset(fw); @@ -105,6 +100,12 @@ nvkm_falcon_fw_boot(struct nvkm_falcon_fw *fw, struct nvkm_subdev *user, goto done; } + /* after last write to the img, sync dma mappings */ + dma_sync_single_for_device(fw->fw.device->dev, + fw->fw.phys, + sg_dma_len(&fw->fw.mem.sgl), + DMA_TO_DEVICE); + ret = fw->func->load(fw); if (ret) goto done; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c index cf58f9da9139..d586aea30898 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c @@ -78,7 +78,7 @@ r535_rpc_status_to_errno(uint32_t rpc_status) switch (rpc_status) { case 0x55: /* NV_ERR_NOT_READY */ case 0x66: /* NV_ERR_TIMEOUT_RETRY */ - return -EAGAIN; + return -EBUSY; case 0x51: /* NV_ERR_NO_MEMORY */ return -ENOMEM; default: @@ -601,7 +601,7 @@ r535_gsp_rpc_rm_alloc_push(struct nvkm_gsp_object *object, void *argv, u32 repc) if (rpc->status) { ret = ERR_PTR(r535_rpc_status_to_errno(rpc->status)); - if (PTR_ERR(ret) != -EAGAIN) + if (PTR_ERR(ret) != -EAGAIN && PTR_ERR(ret) != -EBUSY) nvkm_error(&gsp->subdev, "RM_ALLOC: 0x%x\n", rpc->status); } else { ret = repc ? rpc->params : NULL; @@ -660,7 +660,7 @@ r535_gsp_rpc_rm_ctrl_push(struct nvkm_gsp_object *object, void **argv, u32 repc) if (rpc->status) { ret = r535_rpc_status_to_errno(rpc->status); - if (ret != -EAGAIN) + if (ret != -EAGAIN && ret != -EBUSY) nvkm_error(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x failed: 0x%08x\n", object->client->object.handle, object->handle, rpc->cmd, rpc->status); } diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c index 993691b3cc7e..533f70e8a4a6 100644 --- a/drivers/gpu/drm/omapdrm/dss/dispc.c +++ b/drivers/gpu/drm/omapdrm/dss/dispc.c @@ -691,11 +691,6 @@ u32 dispc_mgr_get_sync_lost_irq(struct dispc_device *dispc, return mgr_desc[channel].sync_lost_irq; } -u32 dispc_wb_get_framedone_irq(struct dispc_device *dispc) -{ - return DISPC_IRQ_FRAMEDONEWB; -} - void dispc_mgr_enable(struct dispc_device *dispc, enum omap_channel channel, bool enable) { @@ -726,30 +721,6 @@ void dispc_mgr_go(struct dispc_device *dispc, enum omap_channel channel) mgr_fld_write(dispc, channel, DISPC_MGR_FLD_GO, 1); } -bool dispc_wb_go_busy(struct dispc_device *dispc) -{ - return REG_GET(dispc, DISPC_CONTROL2, 6, 6) == 1; -} - -void dispc_wb_go(struct dispc_device *dispc) -{ - enum omap_plane_id plane = OMAP_DSS_WB; - bool enable, go; - - enable = REG_GET(dispc, DISPC_OVL_ATTRIBUTES(plane), 0, 0) == 1; - - if (!enable) - return; - - go = REG_GET(dispc, DISPC_CONTROL2, 6, 6) == 1; - if (go) { - DSSERR("GO bit not down for WB\n"); - return; - } - - REG_FLD_MOD(dispc, DISPC_CONTROL2, 1, 6, 6); -} - static void dispc_ovl_write_firh_reg(struct dispc_device *dispc, enum omap_plane_id plane, int reg, u32 value) @@ -1498,17 +1469,6 @@ void dispc_ovl_set_fifo_threshold(struct dispc_device *dispc, min(high, 0xfffu)); } -void dispc_enable_fifomerge(struct dispc_device *dispc, bool enable) -{ - if (!dispc_has_feature(dispc, FEAT_FIFO_MERGE)) { - WARN_ON(enable); - return; - } - - DSSDBG("FIFO merge %s\n", enable ? "enabled" : "disabled"); - REG_FLD_MOD(dispc, DISPC_CONFIG, enable ? 1 : 0, 14, 14); -} - void dispc_ovl_compute_fifo_thresholds(struct dispc_device *dispc, enum omap_plane_id plane, u32 *fifo_low, u32 *fifo_high, @@ -2814,95 +2774,6 @@ int dispc_ovl_setup(struct dispc_device *dispc, return r; } -int dispc_wb_setup(struct dispc_device *dispc, - const struct omap_dss_writeback_info *wi, - bool mem_to_mem, const struct videomode *vm, - enum dss_writeback_channel channel_in) -{ - int r; - u32 l; - enum omap_plane_id plane = OMAP_DSS_WB; - const int pos_x = 0, pos_y = 0; - const u8 zorder = 0, global_alpha = 0; - const bool replication = true; - bool truncation; - int in_width = vm->hactive; - int in_height = vm->vactive; - enum omap_overlay_caps caps = - OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA; - - if (vm->flags & DISPLAY_FLAGS_INTERLACED) - in_height /= 2; - - DSSDBG("dispc_wb_setup, pa %x, pa_uv %x, %d,%d -> %dx%d, cmode %x, " - "rot %d\n", wi->paddr, wi->p_uv_addr, in_width, - in_height, wi->width, wi->height, wi->fourcc, wi->rotation); - - r = dispc_ovl_setup_common(dispc, plane, caps, wi->paddr, wi->p_uv_addr, - wi->buf_width, pos_x, pos_y, in_width, in_height, wi->width, - wi->height, wi->fourcc, wi->rotation, zorder, - wi->pre_mult_alpha, global_alpha, wi->rotation_type, - replication, vm, mem_to_mem, DRM_COLOR_YCBCR_BT601, - DRM_COLOR_YCBCR_LIMITED_RANGE); - if (r) - return r; - - switch (wi->fourcc) { - case DRM_FORMAT_RGB565: - case DRM_FORMAT_RGB888: - case DRM_FORMAT_ARGB4444: - case DRM_FORMAT_RGBA4444: - case DRM_FORMAT_RGBX4444: - case DRM_FORMAT_ARGB1555: - case DRM_FORMAT_XRGB1555: - case DRM_FORMAT_XRGB4444: - truncation = true; - break; - default: - truncation = false; - break; - } - - /* setup extra DISPC_WB_ATTRIBUTES */ - l = dispc_read_reg(dispc, DISPC_OVL_ATTRIBUTES(plane)); - l = FLD_MOD(l, truncation, 10, 10); /* TRUNCATIONENABLE */ - l = FLD_MOD(l, channel_in, 18, 16); /* CHANNELIN */ - l = FLD_MOD(l, mem_to_mem, 19, 19); /* WRITEBACKMODE */ - if (mem_to_mem) - l = FLD_MOD(l, 1, 26, 24); /* CAPTUREMODE */ - else - l = FLD_MOD(l, 0, 26, 24); /* CAPTUREMODE */ - dispc_write_reg(dispc, DISPC_OVL_ATTRIBUTES(plane), l); - - if (mem_to_mem) { - /* WBDELAYCOUNT */ - REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES2(plane), 0, 7, 0); - } else { - u32 wbdelay; - - if (channel_in == DSS_WB_TV_MGR) - wbdelay = vm->vsync_len + vm->vback_porch; - else - wbdelay = vm->vfront_porch + vm->vsync_len + - vm->vback_porch; - - if (vm->flags & DISPLAY_FLAGS_INTERLACED) - wbdelay /= 2; - - wbdelay = min(wbdelay, 255u); - - /* WBDELAYCOUNT */ - REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES2(plane), wbdelay, 7, 0); - } - - return 0; -} - -bool dispc_has_writeback(struct dispc_device *dispc) -{ - return dispc->feat->has_writeback; -} - int dispc_ovl_enable(struct dispc_device *dispc, enum omap_plane_id plane, bool enable) { @@ -3742,23 +3613,6 @@ void dispc_mgr_set_clock_div(struct dispc_device *dispc, cinfo->pck_div); } -int dispc_mgr_get_clock_div(struct dispc_device *dispc, - enum omap_channel channel, - struct dispc_clock_info *cinfo) -{ - unsigned long fck; - - fck = dispc_fclk_rate(dispc); - - cinfo->lck_div = REG_GET(dispc, DISPC_DIVISORo(channel), 23, 16); - cinfo->pck_div = REG_GET(dispc, DISPC_DIVISORo(channel), 7, 0); - - cinfo->lck = fck / cinfo->lck_div; - cinfo->pck = cinfo->lck / cinfo->pck_div; - - return 0; -} - u32 dispc_read_irqstatus(struct dispc_device *dispc) { return dispc_read_reg(dispc, DISPC_IRQSTATUS); @@ -4912,7 +4766,7 @@ static const struct dev_pm_ops dispc_pm_ops = { struct platform_driver omap_dispchw_driver = { .probe = dispc_probe, - .remove_new = dispc_remove, + .remove = dispc_remove, .driver = { .name = "omapdss_dispc", .pm = &dispc_pm_ops, diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c index 030f997eccd0..b17e77f700dd 100644 --- a/drivers/gpu/drm/omapdrm/dss/dpi.c +++ b/drivers/gpu/drm/omapdrm/dss/dpi.c @@ -16,6 +16,7 @@ #include <linux/export.h> #include <linux/kernel.h> #include <linux/of.h> +#include <linux/of_graph.h> #include <linux/platform_device.h> #include <linux/regulator/consumer.h> #include <linux/string.h> @@ -709,7 +710,7 @@ int dpi_init_port(struct dss_device *dss, struct platform_device *pdev, if (!dpi) return -ENOMEM; - ep = of_get_next_child(port, NULL); + ep = of_graph_get_next_port_endpoint(port, NULL); if (!ep) return 0; diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index ea63c64d3a1a..59d20eb8a7e0 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c @@ -5093,7 +5093,7 @@ static const struct dev_pm_ops dsi_pm_ops = { struct platform_driver omap_dsihw_driver = { .probe = dsi_probe, - .remove_new = dsi_remove, + .remove = dsi_remove, .driver = { .name = "omapdss_dsi", .pm = &dsi_pm_ops, diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c index 988888e164d7..7b2df3185de4 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.c +++ b/drivers/gpu/drm/omapdrm/dss/dss.c @@ -1606,7 +1606,7 @@ static const struct dev_pm_ops dss_pm_ops = { struct platform_driver omap_dsshw_driver = { .probe = dss_probe, - .remove_new = dss_remove, + .remove = dss_remove, .shutdown = dss_shutdown, .driver = { .name = "omapdss_dss", diff --git a/drivers/gpu/drm/omapdrm/dss/dss.h b/drivers/gpu/drm/omapdrm/dss/dss.h index 4ff02fbc0e71..a8b231ed4f4b 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.h +++ b/drivers/gpu/drm/omapdrm/dss/dss.h @@ -416,7 +416,6 @@ u32 dispc_mgr_get_framedone_irq(struct dispc_device *dispc, enum omap_channel channel); u32 dispc_mgr_get_sync_lost_irq(struct dispc_device *dispc, enum omap_channel channel); -u32 dispc_wb_get_framedone_irq(struct dispc_device *dispc); u32 dispc_get_memory_bandwidth_limit(struct dispc_device *dispc); @@ -458,20 +457,11 @@ int dispc_ovl_setup(struct dispc_device *dispc, int dispc_ovl_enable(struct dispc_device *dispc, enum omap_plane_id plane, bool enable); -bool dispc_has_writeback(struct dispc_device *dispc); -int dispc_wb_setup(struct dispc_device *dispc, - const struct omap_dss_writeback_info *wi, - bool mem_to_mem, const struct videomode *vm, - enum dss_writeback_channel channel_in); -bool dispc_wb_go_busy(struct dispc_device *dispc); -void dispc_wb_go(struct dispc_device *dispc); - void dispc_enable_sidle(struct dispc_device *dispc); void dispc_disable_sidle(struct dispc_device *dispc); void dispc_lcd_enable_signal(struct dispc_device *dispc, bool enable); void dispc_pck_free_enable(struct dispc_device *dispc, bool enable); -void dispc_enable_fifomerge(struct dispc_device *dispc, bool enable); typedef bool (*dispc_div_calc_func)(int lckd, int pckd, unsigned long lck, unsigned long pck, void *data); @@ -494,9 +484,6 @@ void dispc_ovl_compute_fifo_thresholds(struct dispc_device *dispc, void dispc_mgr_set_clock_div(struct dispc_device *dispc, enum omap_channel channel, const struct dispc_clock_info *cinfo); -int dispc_mgr_get_clock_div(struct dispc_device *dispc, - enum omap_channel channel, - struct dispc_clock_info *cinfo); void dispc_set_tv_pclk(struct dispc_device *dispc, unsigned long pclk); #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c index 9b8747d83ee8..4435f0027c78 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c @@ -852,7 +852,7 @@ static const struct of_device_id hdmi_of_match[] = { struct platform_driver omapdss_hdmi4hw_driver = { .probe = hdmi4_probe, - .remove_new = hdmi4_remove, + .remove = hdmi4_remove, .driver = { .name = "omapdss_hdmi", .of_match_table = hdmi_of_match, diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c index c7ae2235ae99..a8c740df3146 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c @@ -819,7 +819,7 @@ static const struct of_device_id hdmi_of_match[] = { struct platform_driver omapdss_hdmi5hw_driver = { .probe = hdmi5_probe, - .remove_new = hdmi5_remove, + .remove = hdmi5_remove, .driver = { .name = "omapdss_hdmi5", .of_match_table = hdmi_of_match, diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c index 91eaae3b9481..f9ae358e8e52 100644 --- a/drivers/gpu/drm/omapdrm/dss/sdi.c +++ b/drivers/gpu/drm/omapdrm/dss/sdi.c @@ -11,6 +11,7 @@ #include <linux/export.h> #include <linux/kernel.h> #include <linux/of.h> +#include <linux/of_graph.h> #include <linux/platform_device.h> #include <linux/regulator/consumer.h> #include <linux/string.h> @@ -346,7 +347,7 @@ int sdi_init_port(struct dss_device *dss, struct platform_device *pdev, if (!sdi) return -ENOMEM; - ep = of_get_next_child(port, NULL); + ep = of_graph_get_next_port_endpoint(port, NULL); if (!ep) { r = 0; goto err_free; diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c index f163d52a7c7d..aaeef603682c 100644 --- a/drivers/gpu/drm/omapdrm/dss/venc.c +++ b/drivers/gpu/drm/omapdrm/dss/venc.c @@ -912,7 +912,7 @@ static const struct of_device_id venc_of_match[] = { struct platform_driver omap_venchw_driver = { .probe = venc_probe, - .remove_new = venc_remove, + .remove = venc_remove, .driver = { .name = "omapdss_venc", .pm = &venc_pm_ops, diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c index fcd600024136..3fff32c000a6 100644 --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c @@ -1210,7 +1210,7 @@ static const struct of_device_id dmm_of_match[] = { struct platform_driver omap_dmm_driver = { .probe = omap_dmm_probe, - .remove_new = omap_dmm_remove, + .remove = omap_dmm_remove, .driver = { .name = DMM_DRIVER_NAME, .of_match_table = of_match_ptr(dmm_of_match), diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index 1796cd20a877..e27376121606 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -857,7 +857,7 @@ static struct platform_driver pdev = { .pm = &omapdrm_pm_ops, }, .probe = pdev_probe, - .remove_new = pdev_remove, + .remove = pdev_remove, .shutdown = pdev_shutdown, }; diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c index 36f9ee4baad3..30cf1cdc1aa3 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c +++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c @@ -11,7 +11,7 @@ #include "omap_drv.h" -MODULE_IMPORT_NS(DMA_BUF); +MODULE_IMPORT_NS("DMA_BUF"); /* ----------------------------------------------------------------------------- * DMABUF Export diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig index ddfaa99ea9dd..d7469c565d1d 100644 --- a/drivers/gpu/drm/panel/Kconfig +++ b/drivers/gpu/drm/panel/Kconfig @@ -614,6 +614,15 @@ config DRM_PANEL_RONBO_RB070D30 Say Y here if you want to enable support for Ronbo Electronics RB070D30 1024x600 DSI panel. +config DRM_PANEL_SAMSUNG_AMS581VF01 + tristate "Samsung AMS581VF01 panel" + depends on OF + depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE + help + Say Y or M here if you want to enable support for the + Samsung AMS581VF01 FHD Plus (2340x1080@60Hz) CMD mode panel. + config DRM_PANEL_SAMSUNG_AMS639RQ08 tristate "Samsung AMS639RQ08 panel" depends on OF @@ -623,6 +632,16 @@ config DRM_PANEL_SAMSUNG_AMS639RQ08 Say Y or M here if you want to enable support for the Samsung AMS639RQ08 FHD Plus (2340x1080@60Hz) CMD mode panel. +config DRM_PANEL_SAMSUNG_S6E88A0_AMS427AP24 + tristate "Samsung AMS427AP24 panel with S6E88A0 controller" + depends on GPIOLIB && OF && REGULATOR + depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE + help + Say Y here if you want to enable support for Samsung AMS427AP24 panel + with S6E88A0 controller (found in Samsung Galaxy S4 Mini Value Edition + GT-I9195I). To compile this driver as a module, choose M here. + config DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01 tristate "Samsung AMS452EF01 panel with S6E88A0 DSI video mode controller" depends on OF @@ -703,6 +722,7 @@ config DRM_PANEL_SAMSUNG_S6E3HA8 depends on OF depends on DRM_MIPI_DSI depends on BACKLIGHT_CLASS_DEVICE + select DRM_DISPLAY_DSC_HELPER select VIDEOMODE_HELPERS config DRM_PANEL_SAMSUNG_S6E63J0X03 diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile index 4b5eaf111676..7dcf72646cac 100644 --- a/drivers/gpu/drm/panel/Makefile +++ b/drivers/gpu/drm/panel/Makefile @@ -62,6 +62,7 @@ obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM68200) += panel-raydium-rm68200.o obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM692E5) += panel-raydium-rm692e5.o obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM69380) += panel-raydium-rm69380.o obj-$(CONFIG_DRM_PANEL_RONBO_RB070D30) += panel-ronbo-rb070d30.o +obj-$(CONFIG_DRM_PANEL_SAMSUNG_AMS581VF01) += panel-samsung-ams581vf01.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_AMS639RQ08) += panel-samsung-ams639rq08.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_ATNA33XC20) += panel-samsung-atna33xc20.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_DB7430) += panel-samsung-db7430.o @@ -76,6 +77,7 @@ obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03) += panel-samsung-s6e63j0x03.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63M0) += panel-samsung-s6e63m0.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63M0_SPI) += panel-samsung-s6e63m0-spi.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63M0_DSI) += panel-samsung-s6e63m0-dsi.o +obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS427AP24) += panel-samsung-s6e88a0-ams427ap24.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01) += panel-samsung-s6e88a0-ams452ef01.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0) += panel-samsung-s6e8aa0.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_SOFEF00) += panel-samsung-sofef00.o diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c index 767e47a2b0c1..94a46241dece 100644 --- a/drivers/gpu/drm/panel/panel-edp.c +++ b/drivers/gpu/drm/panel/panel-edp.c @@ -1977,11 +1977,13 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('L', 'G', 'D', 0x0567, &delay_200_500_e200_d200, "Unknown"), EDP_PANEL_ENTRY('L', 'G', 'D', 0x05af, &delay_200_500_e200_d200, "Unknown"), EDP_PANEL_ENTRY('L', 'G', 'D', 0x05f1, &delay_200_500_e200_d200, "Unknown"), + EDP_PANEL_ENTRY('L', 'G', 'D', 0x0778, &delay_200_500_e200_d200, "134WT1"), EDP_PANEL_ENTRY('S', 'H', 'P', 0x1511, &delay_200_500_e50, "LQ140M1JW48"), EDP_PANEL_ENTRY('S', 'H', 'P', 0x1523, &delay_80_500_e50, "LQ140M1JW46"), EDP_PANEL_ENTRY('S', 'H', 'P', 0x153a, &delay_200_500_e50, "LQ140T1JH01"), EDP_PANEL_ENTRY('S', 'H', 'P', 0x154c, &delay_200_500_p2e100, "LQ116M1JW10"), + EDP_PANEL_ENTRY('S', 'H', 'P', 0x1593, &delay_200_500_p2e100, "LQ134N1"), EDP_PANEL_ENTRY('S', 'T', 'A', 0x0100, &delay_100_500_e200, "2081116HHD028001-51D"), @@ -2047,7 +2049,7 @@ static struct platform_driver panel_edp_platform_driver = { .pm = &panel_edp_pm_ops, }, .probe = panel_edp_platform_probe, - .remove_new = panel_edp_platform_remove, + .remove = panel_edp_platform_remove, .shutdown = panel_edp_platform_shutdown, }; diff --git a/drivers/gpu/drm/panel/panel-himax-hx83102.c b/drivers/gpu/drm/panel/panel-himax-hx83102.c index 6e4b7e4644ce..8b48bba18131 100644 --- a/drivers/gpu/drm/panel/panel-himax-hx83102.c +++ b/drivers/gpu/drm/panel/panel-himax-hx83102.c @@ -298,7 +298,7 @@ static int ivo_t109nw41_init(struct hx83102 *ctx) msleep(60); hx83102_enable_extended_cmds(&dsi_ctx, true); - mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPOWER, 0x2c, 0xed, 0xed, 0x0f, 0xcf, 0x42, + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPOWER, 0x2c, 0xed, 0xed, 0x27, 0xe7, 0x52, 0xf5, 0x39, 0x36, 0x36, 0x36, 0x36, 0x32, 0x8b, 0x11, 0x65, 0x00, 0x88, 0xfa, 0xff, 0xff, 0x8f, 0xff, 0x08, 0xd6, 0x33); mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETDISP, 0x00, 0x47, 0xb0, 0x80, 0x00, 0x12, @@ -343,11 +343,11 @@ static int ivo_t109nw41_init(struct hx83102 *ctx) 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00); - mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGMA, 0x04, 0x04, 0x06, 0x0a, 0x0a, 0x05, - 0x12, 0x14, 0x17, 0x13, 0x2c, 0x33, 0x39, 0x4b, 0x4c, 0x56, 0x61, 0x78, - 0x7a, 0x41, 0x50, 0x68, 0x73, 0x04, 0x04, 0x06, 0x0a, 0x0a, 0x05, 0x12, - 0x14, 0x17, 0x13, 0x2c, 0x33, 0x39, 0x4b, 0x4c, 0x56, 0x61, 0x78, 0x7a, - 0x41, 0x50, 0x68, 0x73); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGMA, 0x00, 0x07, 0x10, 0x17, 0x1c, 0x33, + 0x48, 0x50, 0x57, 0x50, 0x68, 0x6e, 0x71, 0x7f, 0x81, 0x8a, 0x8e, 0x9b, + 0x9c, 0x4d, 0x56, 0x5d, 0x73, 0x00, 0x07, 0x10, 0x17, 0x1c, 0x33, 0x48, + 0x50, 0x57, 0x50, 0x68, 0x6e, 0x71, 0x7f, 0x81, 0x8a, 0x8e, 0x9b, 0x9c, + 0x4d, 0x56, 0x5d, 0x73); mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETTP1, 0x07, 0x10, 0x10, 0x1a, 0x26, 0x9e, 0x00, 0x4f, 0xa0, 0x14, 0x14, 0x00, 0x00, 0x00, 0x00, 0x12, 0x0a, 0x02, 0x02, 0x00, 0x33, 0x02, 0x04, 0x18, 0x01); diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c index 4a6dcfd781e8..94b7dfef3b5e 100644 --- a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c +++ b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c @@ -318,7 +318,7 @@ static int ili9322_regmap_spi_read(void *context, const void *reg, return spi_write_then_read(spi, buf, 1, val, 1); } -static struct regmap_bus ili9322_regmap_bus = { +static const struct regmap_bus ili9322_regmap_bus = { .write = ili9322_regmap_spi_write, .read = ili9322_regmap_spi_read, .reg_format_endian_default = REGMAP_ENDIAN_BIG, diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c index 292aa26a456d..77f74e6c467e 100644 --- a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c +++ b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c @@ -26,7 +26,7 @@ struct ltk050h3146w; struct ltk050h3146w_desc { const unsigned long mode_flags; const struct drm_display_mode *mode; - int (*init)(struct ltk050h3146w *ctx); + void (*init)(struct mipi_dsi_multi_context *dsi_ctx); }; struct ltk050h3146w { @@ -243,67 +243,57 @@ struct ltk050h3146w *panel_to_ltk050h3146w(struct drm_panel *panel) return container_of(panel, struct ltk050h3146w, panel); } -static int ltk050h3148w_init_sequence(struct ltk050h3146w *ctx) +static void ltk050h3148w_init_sequence(struct mipi_dsi_multi_context *dsi_ctx) { - struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); - int ret; - /* * Init sequence was supplied by the panel vendor without much * documentation. */ - mipi_dsi_dcs_write_seq(dsi, 0xb9, 0xff, 0x83, 0x94); - mipi_dsi_dcs_write_seq(dsi, 0xb1, 0x50, 0x15, 0x75, 0x09, 0x32, 0x44, - 0x71, 0x31, 0x55, 0x2f); - mipi_dsi_dcs_write_seq(dsi, 0xba, 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0); - mipi_dsi_dcs_write_seq(dsi, 0xd2, 0x88); - mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x00, 0x80, 0x64, 0x10, 0x07); - mipi_dsi_dcs_write_seq(dsi, 0xb4, 0x05, 0x70, 0x05, 0x70, 0x01, 0x70, - 0x01, 0x0c, 0x86, 0x75, 0x00, 0x3f, 0x01, 0x74, - 0x01, 0x74, 0x01, 0x74, 0x01, 0x0c, 0x86); - mipi_dsi_dcs_write_seq(dsi, 0xd3, 0x00, 0x00, 0x07, 0x07, 0x40, 0x1e, - 0x08, 0x00, 0x32, 0x10, 0x08, 0x00, 0x08, 0x54, - 0x15, 0x10, 0x05, 0x04, 0x02, 0x12, 0x10, 0x05, - 0x07, 0x33, 0x34, 0x0c, 0x0c, 0x37, 0x10, 0x07, - 0x17, 0x11, 0x40); - mipi_dsi_dcs_write_seq(dsi, 0xd5, 0x19, 0x19, 0x18, 0x18, 0x1b, 0x1b, - 0x1a, 0x1a, 0x04, 0x05, 0x06, 0x07, 0x00, 0x01, - 0x02, 0x03, 0x20, 0x21, 0x18, 0x18, 0x22, 0x23, - 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, - 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, - 0x18, 0x18, 0x18, 0x18, 0x18, 0x18); - mipi_dsi_dcs_write_seq(dsi, 0xd6, 0x18, 0x18, 0x19, 0x19, 0x1b, 0x1b, - 0x1a, 0x1a, 0x03, 0x02, 0x01, 0x00, 0x07, 0x06, - 0x05, 0x04, 0x23, 0x22, 0x18, 0x18, 0x21, 0x20, - 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, - 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, - 0x18, 0x18, 0x18, 0x18, 0x18, 0x18); - mipi_dsi_dcs_write_seq(dsi, 0xe0, 0x00, 0x03, 0x09, 0x11, 0x11, 0x14, - 0x18, 0x16, 0x2e, 0x3d, 0x4d, 0x4d, 0x58, 0x6c, - 0x72, 0x78, 0x88, 0x8b, 0x86, 0xa4, 0xb2, 0x58, - 0x55, 0x59, 0x5b, 0x5d, 0x60, 0x64, 0x7f, 0x00, - 0x03, 0x09, 0x0f, 0x11, 0x14, 0x18, 0x16, 0x2e, - 0x3d, 0x4d, 0x4d, 0x58, 0x6d, 0x73, 0x78, 0x88, - 0x8b, 0x87, 0xa5, 0xb2, 0x58, 0x55, 0x58, 0x5b, - 0x5d, 0x61, 0x65, 0x7f); - mipi_dsi_dcs_write_seq(dsi, 0xcc, 0x0b); - mipi_dsi_dcs_write_seq(dsi, 0xc0, 0x1f, 0x31); - mipi_dsi_dcs_write_seq(dsi, 0xb6, 0xc4, 0xc4); - mipi_dsi_dcs_write_seq(dsi, 0xbd, 0x01); - mipi_dsi_dcs_write_seq(dsi, 0xb1, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0xbd, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0xc6, 0xef); - mipi_dsi_dcs_write_seq(dsi, 0xd4, 0x02); - - ret = mipi_dsi_dcs_set_tear_on(dsi, 1); - if (ret < 0) { - dev_err(ctx->dev, "failed to set tear on: %d\n", ret); - return ret; - } - - msleep(60); - - return 0; + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb9, 0xff, 0x83, 0x94); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb1, 0x50, 0x15, 0x75, 0x09, 0x32, 0x44, + 0x71, 0x31, 0x55, 0x2f); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xba, 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd2, 0x88); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb2, 0x00, 0x80, 0x64, 0x10, 0x07); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb4, 0x05, 0x70, 0x05, 0x70, 0x01, 0x70, + 0x01, 0x0c, 0x86, 0x75, 0x00, 0x3f, 0x01, 0x74, + 0x01, 0x74, 0x01, 0x74, 0x01, 0x0c, 0x86); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd3, 0x00, 0x00, 0x07, 0x07, 0x40, 0x1e, + 0x08, 0x00, 0x32, 0x10, 0x08, 0x00, 0x08, 0x54, + 0x15, 0x10, 0x05, 0x04, 0x02, 0x12, 0x10, 0x05, + 0x07, 0x33, 0x34, 0x0c, 0x0c, 0x37, 0x10, 0x07, + 0x17, 0x11, 0x40); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd5, 0x19, 0x19, 0x18, 0x18, 0x1b, 0x1b, + 0x1a, 0x1a, 0x04, 0x05, 0x06, 0x07, 0x00, 0x01, + 0x02, 0x03, 0x20, 0x21, 0x18, 0x18, 0x22, 0x23, + 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, + 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, + 0x18, 0x18, 0x18, 0x18, 0x18, 0x18); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd6, 0x18, 0x18, 0x19, 0x19, 0x1b, 0x1b, + 0x1a, 0x1a, 0x03, 0x02, 0x01, 0x00, 0x07, 0x06, + 0x05, 0x04, 0x23, 0x22, 0x18, 0x18, 0x21, 0x20, + 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, + 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, + 0x18, 0x18, 0x18, 0x18, 0x18, 0x18); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xe0, 0x00, 0x03, 0x09, 0x11, 0x11, 0x14, + 0x18, 0x16, 0x2e, 0x3d, 0x4d, 0x4d, 0x58, 0x6c, + 0x72, 0x78, 0x88, 0x8b, 0x86, 0xa4, 0xb2, 0x58, + 0x55, 0x59, 0x5b, 0x5d, 0x60, 0x64, 0x7f, 0x00, + 0x03, 0x09, 0x0f, 0x11, 0x14, 0x18, 0x16, 0x2e, + 0x3d, 0x4d, 0x4d, 0x58, 0x6d, 0x73, 0x78, 0x88, + 0x8b, 0x87, 0xa5, 0xb2, 0x58, 0x55, 0x58, 0x5b, + 0x5d, 0x61, 0x65, 0x7f); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xcc, 0x0b); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc0, 0x1f, 0x31); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb6, 0xc4, 0xc4); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xbd, 0x01); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb1, 0x00); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xbd, 0x00); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc6, 0xef); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd4, 0x02); + + mipi_dsi_dcs_set_tear_on_multi(dsi_ctx, 1); + mipi_dsi_msleep(dsi_ctx, 60); } static const struct drm_display_mode ltk050h3148w_mode = { @@ -327,74 +317,64 @@ static const struct ltk050h3146w_desc ltk050h3148w_data = { MIPI_DSI_MODE_VIDEO_BURST, }; -static int ltk050h3146w_init_sequence(struct ltk050h3146w *ctx) +static void ltk050h3146w_init_sequence(struct mipi_dsi_multi_context *dsi_ctx) { - struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); - int ret; - /* * Init sequence was supplied by the panel vendor without much * documentation. */ - mipi_dsi_dcs_write_seq(dsi, 0xdf, 0x93, 0x65, 0xf8); - mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x01, 0x03, 0x02, 0x00, 0x64, 0x06, - 0x01); - mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x00, 0xb5); - mipi_dsi_dcs_write_seq(dsi, 0xb3, 0x00, 0xb5); - mipi_dsi_dcs_write_seq(dsi, 0xb7, 0x00, 0xbf, 0x00, 0x00, 0xbf, 0x00); - - mipi_dsi_dcs_write_seq(dsi, 0xb9, 0x00, 0xc4, 0x23, 0x07); - mipi_dsi_dcs_write_seq(dsi, 0xbb, 0x02, 0x01, 0x24, 0x00, 0x28, 0x0f, - 0x28, 0x04, 0xcc, 0xcc, 0xcc); - mipi_dsi_dcs_write_seq(dsi, 0xbc, 0x0f, 0x04); - mipi_dsi_dcs_write_seq(dsi, 0xbe, 0x1e, 0xf2); - mipi_dsi_dcs_write_seq(dsi, 0xc0, 0x26, 0x03); - mipi_dsi_dcs_write_seq(dsi, 0xc1, 0x00, 0x12); - mipi_dsi_dcs_write_seq(dsi, 0xc3, 0x04, 0x02, 0x02, 0x76, 0x01, 0x80, - 0x80); - mipi_dsi_dcs_write_seq(dsi, 0xc4, 0x24, 0x80, 0xb4, 0x81, 0x12, 0x0f, - 0x16, 0x00, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0xc8, 0x7f, 0x72, 0x67, 0x5d, 0x5d, 0x50, - 0x56, 0x41, 0x59, 0x57, 0x55, 0x70, 0x5b, 0x5f, - 0x4f, 0x47, 0x38, 0x23, 0x08, 0x7f, 0x72, 0x67, - 0x5d, 0x5d, 0x50, 0x56, 0x41, 0x59, 0x57, 0x55, - 0x70, 0x5b, 0x5f, 0x4f, 0x47, 0x38, 0x23, 0x08); - mipi_dsi_dcs_write_seq(dsi, 0xd0, 0x1e, 0x1f, 0x57, 0x58, 0x48, 0x4a, - 0x44, 0x46, 0x40, 0x1f, 0x42, 0x1f, 0x1f, 0x1f, - 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f); - mipi_dsi_dcs_write_seq(dsi, 0xd1, 0x1e, 0x1f, 0x57, 0x58, 0x49, 0x4b, - 0x45, 0x47, 0x41, 0x1f, 0x43, 0x1f, 0x1f, 0x1f, - 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f); - mipi_dsi_dcs_write_seq(dsi, 0xd2, 0x1f, 0x1e, 0x17, 0x18, 0x07, 0x05, - 0x0b, 0x09, 0x03, 0x1f, 0x01, 0x1f, 0x1f, 0x1f, - 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f); - mipi_dsi_dcs_write_seq(dsi, 0xd3, 0x1f, 0x1e, 0x17, 0x18, 0x06, 0x04, - 0x0a, 0x08, 0x02, 0x1f, 0x00, 0x1f, 0x1f, 0x1f, - 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f); - mipi_dsi_dcs_write_seq(dsi, 0xd4, 0x00, 0x00, 0x00, 0x0c, 0x06, 0x20, - 0x01, 0x02, 0x00, 0x60, 0x15, 0xb0, 0x30, 0x03, - 0x04, 0x00, 0x60, 0x72, 0x0a, 0x00, 0x60, 0x08); - mipi_dsi_dcs_write_seq(dsi, 0xd5, 0x00, 0x06, 0x06, 0x00, 0x30, 0x00, - 0x00, 0x00, 0x00, 0x00, 0xbc, 0x50, 0x00, 0x05, - 0x21, 0x00, 0x60); - mipi_dsi_dcs_write_seq(dsi, 0xdd, 0x2c, 0xa3, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0xde, 0x02); - mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x32, 0x1c); - mipi_dsi_dcs_write_seq(dsi, 0xb7, 0x3b, 0x70, 0x00, 0x04); - mipi_dsi_dcs_write_seq(dsi, 0xc1, 0x11); - mipi_dsi_dcs_write_seq(dsi, 0xbb, 0x21, 0x22, 0x23, 0x24, 0x36, 0x37); - mipi_dsi_dcs_write_seq(dsi, 0xc2, 0x20, 0x38, 0x1e, 0x84); - mipi_dsi_dcs_write_seq(dsi, 0xde, 0x00); - - ret = mipi_dsi_dcs_set_tear_on(dsi, 1); - if (ret < 0) { - dev_err(ctx->dev, "failed to set tear on: %d\n", ret); - return ret; - } - - msleep(60); - - return 0; + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xdf, 0x93, 0x65, 0xf8); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb0, 0x01, 0x03, 0x02, 0x00, 0x64, 0x06, + 0x01); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb2, 0x00, 0xb5); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb3, 0x00, 0xb5); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb7, 0x00, 0xbf, 0x00, 0x00, 0xbf, 0x00); + + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb9, 0x00, 0xc4, 0x23, 0x07); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xbb, 0x02, 0x01, 0x24, 0x00, 0x28, 0x0f, + 0x28, 0x04, 0xcc, 0xcc, 0xcc); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xbc, 0x0f, 0x04); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xbe, 0x1e, 0xf2); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc0, 0x26, 0x03); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc1, 0x00, 0x12); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc3, 0x04, 0x02, 0x02, 0x76, 0x01, 0x80, + 0x80); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc4, 0x24, 0x80, 0xb4, 0x81, 0x12, 0x0f, + 0x16, 0x00, 0x00); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc8, 0x7f, 0x72, 0x67, 0x5d, 0x5d, 0x50, + 0x56, 0x41, 0x59, 0x57, 0x55, 0x70, 0x5b, 0x5f, + 0x4f, 0x47, 0x38, 0x23, 0x08, 0x7f, 0x72, 0x67, + 0x5d, 0x5d, 0x50, 0x56, 0x41, 0x59, 0x57, 0x55, + 0x70, 0x5b, 0x5f, 0x4f, 0x47, 0x38, 0x23, 0x08); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd0, 0x1e, 0x1f, 0x57, 0x58, 0x48, 0x4a, + 0x44, 0x46, 0x40, 0x1f, 0x42, 0x1f, 0x1f, 0x1f, + 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd1, 0x1e, 0x1f, 0x57, 0x58, 0x49, 0x4b, + 0x45, 0x47, 0x41, 0x1f, 0x43, 0x1f, 0x1f, 0x1f, + 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd2, 0x1f, 0x1e, 0x17, 0x18, 0x07, 0x05, + 0x0b, 0x09, 0x03, 0x1f, 0x01, 0x1f, 0x1f, 0x1f, + 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd3, 0x1f, 0x1e, 0x17, 0x18, 0x06, 0x04, + 0x0a, 0x08, 0x02, 0x1f, 0x00, 0x1f, 0x1f, 0x1f, + 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd4, 0x00, 0x00, 0x00, 0x0c, 0x06, 0x20, + 0x01, 0x02, 0x00, 0x60, 0x15, 0xb0, 0x30, 0x03, + 0x04, 0x00, 0x60, 0x72, 0x0a, 0x00, 0x60, 0x08); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd5, 0x00, 0x06, 0x06, 0x00, 0x30, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xbc, 0x50, 0x00, 0x05, + 0x21, 0x00, 0x60); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xdd, 0x2c, 0xa3, 0x00); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xde, 0x02); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb2, 0x32, 0x1c); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb7, 0x3b, 0x70, 0x00, 0x04); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc1, 0x11); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xbb, 0x21, 0x22, 0x23, 0x24, 0x36, 0x37); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc2, 0x20, 0x38, 0x1e, 0x84); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xde, 0x00); + + mipi_dsi_dcs_set_tear_on_multi(dsi_ctx, 1); + mipi_dsi_msleep(dsi_ctx, 60); } static const struct drm_display_mode ltk050h3146w_mode = { @@ -418,79 +398,42 @@ static const struct ltk050h3146w_desc ltk050h3146w_data = { MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET, }; -static int ltk050h3146w_a2_select_page(struct ltk050h3146w *ctx, int page) +static void ltk050h3146w_a2_select_page(struct mipi_dsi_multi_context *dsi_ctx, int page) { - struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); - u8 d[3] = { 0x98, 0x81, page }; + u8 d[4] = { 0xff, 0x98, 0x81, page }; - return mipi_dsi_dcs_write(dsi, 0xff, d, ARRAY_SIZE(d)); + mipi_dsi_dcs_write_buffer_multi(dsi_ctx, d, ARRAY_SIZE(d)); } -static int ltk050h3146w_a2_write_page(struct ltk050h3146w *ctx, int page, +static void ltk050h3146w_a2_write_page(struct mipi_dsi_multi_context *dsi_ctx, int page, const struct ltk050h3146w_cmd *cmds, int num) { - struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); - int i, ret; + ltk050h3146w_a2_select_page(dsi_ctx, page); - ret = ltk050h3146w_a2_select_page(ctx, page); - if (ret < 0) { - dev_err(ctx->dev, "failed to select page %d: %d\n", page, ret); - return ret; - } - - for (i = 0; i < num; i++) { - ret = mipi_dsi_generic_write(dsi, &cmds[i], + for (int i = 0; i < num; i++) + mipi_dsi_generic_write_multi(dsi_ctx, &cmds[i], sizeof(struct ltk050h3146w_cmd)); - if (ret < 0) { - dev_err(ctx->dev, "failed to write page %d init cmds: %d\n", page, ret); - return ret; - } - } - - return 0; } -static int ltk050h3146w_a2_init_sequence(struct ltk050h3146w *ctx) +static void ltk050h3146w_a2_init_sequence(struct mipi_dsi_multi_context *dsi_ctx) { - struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); - int ret; - /* * Init sequence was supplied by the panel vendor without much * documentation. */ - ret = ltk050h3146w_a2_write_page(ctx, 3, page3_cmds, + ltk050h3146w_a2_write_page(dsi_ctx, 3, page3_cmds, ARRAY_SIZE(page3_cmds)); - if (ret < 0) - return ret; - - ret = ltk050h3146w_a2_write_page(ctx, 4, page4_cmds, + ltk050h3146w_a2_write_page(dsi_ctx, 4, page4_cmds, ARRAY_SIZE(page4_cmds)); - if (ret < 0) - return ret; - - ret = ltk050h3146w_a2_write_page(ctx, 1, page1_cmds, + ltk050h3146w_a2_write_page(dsi_ctx, 1, page1_cmds, ARRAY_SIZE(page1_cmds)); - if (ret < 0) - return ret; - - ret = ltk050h3146w_a2_select_page(ctx, 0); - if (ret < 0) { - dev_err(ctx->dev, "failed to select page 0: %d\n", ret); - return ret; - } + ltk050h3146w_a2_select_page(dsi_ctx, 0); /* vendor code called this without param, where there should be one */ - ret = mipi_dsi_dcs_set_tear_on(dsi, 0); - if (ret < 0) { - dev_err(ctx->dev, "failed to set tear on: %d\n", ret); - return ret; - } - - msleep(60); + mipi_dsi_dcs_set_tear_on_multi(dsi_ctx, 0); - return 0; + mipi_dsi_msleep(dsi_ctx, 60); } static const struct drm_display_mode ltk050h3146w_a2_mode = { @@ -518,19 +461,12 @@ static int ltk050h3146w_unprepare(struct drm_panel *panel) { struct ltk050h3146w *ctx = panel_to_ltk050h3146w(panel); struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); - int ret; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; - ret = mipi_dsi_dcs_set_display_off(dsi); - if (ret < 0) { - dev_err(ctx->dev, "failed to set display off: %d\n", ret); - return ret; - } - - mipi_dsi_dcs_enter_sleep_mode(dsi); - if (ret < 0) { - dev_err(ctx->dev, "failed to enter sleep mode: %d\n", ret); - return ret; - } + mipi_dsi_dcs_set_display_off_multi(&dsi_ctx); + mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx); + if (dsi_ctx.accum_err) + return dsi_ctx.accum_err; regulator_disable(ctx->iovcc); regulator_disable(ctx->vci); @@ -542,17 +478,17 @@ static int ltk050h3146w_prepare(struct drm_panel *panel) { struct ltk050h3146w *ctx = panel_to_ltk050h3146w(panel); struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); - int ret; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; dev_dbg(ctx->dev, "Resetting the panel\n"); - ret = regulator_enable(ctx->vci); - if (ret < 0) { - dev_err(ctx->dev, "Failed to enable vci supply: %d\n", ret); - return ret; + dsi_ctx.accum_err = regulator_enable(ctx->vci); + if (dsi_ctx.accum_err) { + dev_err(ctx->dev, "Failed to enable vci supply: %d\n", dsi_ctx.accum_err); + return dsi_ctx.accum_err; } - ret = regulator_enable(ctx->iovcc); - if (ret < 0) { - dev_err(ctx->dev, "Failed to enable iovcc supply: %d\n", ret); + dsi_ctx.accum_err = regulator_enable(ctx->iovcc); + if (dsi_ctx.accum_err) { + dev_err(ctx->dev, "Failed to enable iovcc supply: %d\n", dsi_ctx.accum_err); goto disable_vci; } @@ -561,28 +497,15 @@ static int ltk050h3146w_prepare(struct drm_panel *panel) gpiod_set_value_cansleep(ctx->reset_gpio, 0); msleep(20); - ret = ctx->panel_desc->init(ctx); - if (ret < 0) { - dev_err(ctx->dev, "Panel init sequence failed: %d\n", ret); - goto disable_iovcc; - } - - ret = mipi_dsi_dcs_exit_sleep_mode(dsi); - if (ret < 0) { - dev_err(ctx->dev, "Failed to exit sleep mode: %d\n", ret); - goto disable_iovcc; - } - + ctx->panel_desc->init(&dsi_ctx); + mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx); /* T9: 120ms */ - msleep(120); + mipi_dsi_msleep(&dsi_ctx, 120); + mipi_dsi_dcs_set_display_on_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 50); - ret = mipi_dsi_dcs_set_display_on(dsi); - if (ret < 0) { - dev_err(ctx->dev, "Failed to set display on: %d\n", ret); + if (dsi_ctx.accum_err) goto disable_iovcc; - } - - msleep(50); return 0; @@ -590,7 +513,7 @@ disable_iovcc: regulator_disable(ctx->iovcc); disable_vci: regulator_disable(ctx->vci); - return ret; + return dsi_ctx.accum_err; } static int ltk050h3146w_get_modes(struct drm_panel *panel, diff --git a/drivers/gpu/drm/panel/panel-lvds.c b/drivers/gpu/drm/panel/panel-lvds.c index 1b8e3156914c..ba6c015aabba 100644 --- a/drivers/gpu/drm/panel/panel-lvds.c +++ b/drivers/gpu/drm/panel/panel-lvds.c @@ -246,7 +246,7 @@ MODULE_DEVICE_TABLE(of, panel_lvds_of_table); static struct platform_driver panel_lvds_driver = { .probe = panel_lvds_probe, - .remove_new = panel_lvds_remove, + .remove = panel_lvds_remove, .driver = { .name = "panel-lvds", .of_match_table = panel_lvds_of_table, diff --git a/drivers/gpu/drm/panel/panel-newvision-nv3052c.c b/drivers/gpu/drm/panel/panel-newvision-nv3052c.c index d3baccfe6286..06e16a7c14a7 100644 --- a/drivers/gpu/drm/panel/panel-newvision-nv3052c.c +++ b/drivers/gpu/drm/panel/panel-newvision-nv3052c.c @@ -917,7 +917,7 @@ static const struct nv3052c_panel_info wl_355608_a8_panel_info = { static const struct spi_device_id nv3052c_ids[] = { { "ltk035c5444t", }, { "fs035vg158", }, - { "wl-355608-a8", }, + { "rg35xx-plus-panel", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(spi, nv3052c_ids); diff --git a/drivers/gpu/drm/panel/panel-samsung-ams581vf01.c b/drivers/gpu/drm/panel/panel-samsung-ams581vf01.c new file mode 100644 index 000000000000..cf6186312252 --- /dev/null +++ b/drivers/gpu/drm/panel/panel-samsung-ams581vf01.c @@ -0,0 +1,283 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024, Danila Tikhonov <danila@jiaxyga.com> + */ + +#include <linux/backlight.h> +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/regulator/consumer.h> + +#include <video/mipi_display.h> + +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_modes.h> +#include <drm/drm_panel.h> +#include <drm/drm_probe_helper.h> + +/* Manufacturer Command Set */ +#define MCS_ACCESS_PROT_OFF 0xb0 +#define MCS_PASSWD 0xf0 + +struct ams581vf01 { + struct drm_panel panel; + struct mipi_dsi_device *dsi; + struct gpio_desc *reset_gpio; + struct regulator_bulk_data *supplies; +}; + +static const struct regulator_bulk_data ams581vf01_supplies[] = { + { .supply = "vdd3p3" }, + { .supply = "vddio" }, + { .supply = "vsn" }, + { .supply = "vsp" }, +}; + +static inline struct ams581vf01 *to_ams581vf01(struct drm_panel *panel) +{ + return container_of(panel, struct ams581vf01, panel); +} + +static void ams581vf01_reset(struct ams581vf01 *ctx) +{ + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + usleep_range(10000, 11000); + gpiod_set_value_cansleep(ctx->reset_gpio, 0); + usleep_range(10000, 11000); +} + +static int ams581vf01_on(struct ams581vf01 *ctx) +{ + struct mipi_dsi_device *dsi = ctx->dsi; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; + + /* Sleep Out, Wait 10ms */ + mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx); + usleep_range(10000, 11000); + + /* TE On */ + mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK); + + /* MIC Setting */ + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD, 0x5a, 0x5a); /* Unlock */ + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xeb, 0x17, + 0x41, 0x92, + 0x0e, 0x10, + 0x82, 0x5a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD, 0xa5, 0xa5); /* Lock */ + + /* Column & Page Address Setting */ + mipi_dsi_dcs_set_column_address_multi(&dsi_ctx, 0x0000, 0x0437); + mipi_dsi_dcs_set_page_address_multi(&dsi_ctx, 0x0000, 0x0923); + + /* Brightness Setting */ + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20); + + /* Horizontal & Vertical sync Setting */ + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD, 0x5a, 0x5a); /* Unlock */ + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_ACCESS_PROT_OFF, 0x09); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe8, 0x11, 0x30); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD, 0xa5, 0xa5); /* Lock */ + mipi_dsi_msleep(&dsi_ctx, 110); + + /* Display On */ + mipi_dsi_dcs_set_display_on_multi(&dsi_ctx); + + return dsi_ctx.accum_err; +} + +static void ams581vf01_off(struct ams581vf01 *ctx) +{ + struct mipi_dsi_device *dsi = ctx->dsi; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; + + /* Display Off & Sleep In */ + mipi_dsi_dcs_set_display_off_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 20); + mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx); + + /* VCI operating mode change */ + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD, 0x5a, 0x5a); /* Unlock */ + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_ACCESS_PROT_OFF, 0x05); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf4, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD, 0xa5, 0xa5); /* Lock */ + + mipi_dsi_msleep(&dsi_ctx, 120); +} + +static int ams581vf01_prepare(struct drm_panel *panel) +{ + struct ams581vf01 *ctx = to_ams581vf01(panel); + int ret; + + ret = regulator_bulk_enable(ARRAY_SIZE(ams581vf01_supplies), + ctx->supplies); + if (ret < 0) + return ret; + + ams581vf01_reset(ctx); + + ret = ams581vf01_on(ctx); + if (ret < 0) { + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + regulator_bulk_disable(ARRAY_SIZE(ams581vf01_supplies), + ctx->supplies); + return ret; + } + + return 0; +} + +static int ams581vf01_unprepare(struct drm_panel *panel) +{ + struct ams581vf01 *ctx = to_ams581vf01(panel); + + ams581vf01_off(ctx); + + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + regulator_bulk_disable(ARRAY_SIZE(ams581vf01_supplies), + ctx->supplies); + + return 0; +} + +static const struct drm_display_mode ams581vf01_mode = { + .clock = (1080 + 32 + 73 + 98) * (2340 + 8 + 1 + 8) * 60 / 1000, + .hdisplay = 1080, + .hsync_start = 1080 + 32, + .hsync_end = 1080 + 32 + 73, + .htotal = 1080 + 32 + 73 + 98, + .vdisplay = 2340, + .vsync_start = 2340 + 8, + .vsync_end = 2340 + 8 + 1, + .vtotal = 2340 + 8 + 1 + 8, + .width_mm = 62, + .height_mm = 134, + .type = DRM_MODE_TYPE_DRIVER, +}; + +static int ams581vf01_get_modes(struct drm_panel *panel, + struct drm_connector *connector) +{ + return drm_connector_helper_get_modes_fixed(connector, &ams581vf01_mode); +} + +static const struct drm_panel_funcs ams581vf01_panel_funcs = { + .prepare = ams581vf01_prepare, + .unprepare = ams581vf01_unprepare, + .get_modes = ams581vf01_get_modes, +}; + +static int ams581vf01_bl_update_status(struct backlight_device *bl) +{ + struct mipi_dsi_device *dsi = bl_get_data(bl); + u16 brightness = backlight_get_brightness(bl); + int ret; + + dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; + + ret = mipi_dsi_dcs_set_display_brightness_large(dsi, brightness); + if (ret < 0) + return ret; + + dsi->mode_flags |= MIPI_DSI_MODE_LPM; + + return 0; +} + +static const struct backlight_ops ams581vf01_bl_ops = { + .update_status = ams581vf01_bl_update_status, +}; + +static struct backlight_device * +ams581vf01_create_backlight(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + const struct backlight_properties props = { + .type = BACKLIGHT_RAW, + .brightness = 511, + .max_brightness = 1023, + }; + + return devm_backlight_device_register(dev, dev_name(dev), dev, dsi, + &ams581vf01_bl_ops, &props); +} + +static int ams581vf01_probe(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + struct ams581vf01 *ctx; + int ret; + + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ret = devm_regulator_bulk_get_const(&dsi->dev, + ARRAY_SIZE(ams581vf01_supplies), + ams581vf01_supplies, + &ctx->supplies); + if (ret < 0) + return dev_err_probe(dev, ret, "Failed to get regulators\n"); + + ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(ctx->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio), + "Failed to get reset-gpios\n"); + + ctx->dsi = dsi; + mipi_dsi_set_drvdata(dsi, ctx); + + dsi->lanes = 4; + dsi->format = MIPI_DSI_FMT_RGB888; + dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST | + MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM; + + drm_panel_init(&ctx->panel, dev, &ams581vf01_panel_funcs, + DRM_MODE_CONNECTOR_DSI); + ctx->panel.prepare_prev_first = true; + + ctx->panel.backlight = ams581vf01_create_backlight(dsi); + if (IS_ERR(ctx->panel.backlight)) + return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight), + "Failed to create backlight\n"); + + drm_panel_add(&ctx->panel); + + ret = devm_mipi_dsi_attach(dev, dsi); + if (ret < 0) { + drm_panel_remove(&ctx->panel); + return dev_err_probe(dev, ret, "Failed to attach to DSI host\n"); + } + + return 0; +} + +static void ams581vf01_remove(struct mipi_dsi_device *dsi) +{ + struct ams581vf01 *ctx = mipi_dsi_get_drvdata(dsi); + + drm_panel_remove(&ctx->panel); +} + +static const struct of_device_id ams581vf01_of_match[] = { + { .compatible = "samsung,ams581vf01" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, ams581vf01_of_match); + +static struct mipi_dsi_driver ams581vf01_driver = { + .probe = ams581vf01_probe, + .remove = ams581vf01_remove, + .driver = { + .name = "panel-samsung-ams581vf01", + .of_match_table = ams581vf01_of_match, + }, +}; +module_mipi_dsi_driver(ams581vf01_driver); + +MODULE_AUTHOR("Danila Tikhonov <danila@jiaxyga.com>"); +MODULE_DESCRIPTION("DRM driver for SAMSUNG AMS581VF01 cmd mode dsi panel"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3ha8.c b/drivers/gpu/drm/panel/panel-samsung-s6e3ha8.c index 76b5a6ea610d..64c6f7d45bed 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6e3ha8.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6e3ha8.c @@ -24,7 +24,7 @@ struct s6e3ha8 { struct regulator_bulk_data *supplies; }; -const struct regulator_bulk_data s6e3ha8_supplies[] = { +static const struct regulator_bulk_data s6e3ha8_supplies[] = { { .supply = "vdd3" }, { .supply = "vci" }, { .supply = "vddr" }, diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c new file mode 100644 index 000000000000..e92e95158d1f --- /dev/null +++ b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c @@ -0,0 +1,766 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Samsung AMS427AP24 panel with S6E88A0 controller + * Copyright (c) 2024 Jakob Hauser <jahau@rocketmail.com> + */ + +#include <linux/backlight.h> +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/module.h> +#include <linux/regulator/consumer.h> + +#include <video/mipi_display.h> + +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_modes.h> +#include <drm/drm_panel.h> +#include <drm/drm_probe_helper.h> + +#define NUM_STEPS_CANDELA 54 +#define NUM_STEPS_AID 39 +#define NUM_STEPS_ELVSS 17 + +/* length of the payload data, thereof fixed and variable */ +#define FIX_LEN_AID 4 +#define FIX_LEN_ELVSS 2 +#define FIX_LEN_GAMMA 1 +#define VAR_LEN_AID 2 +#define VAR_LEN_ELVSS 1 +#define VAR_LEN_GAMMA 33 +#define LEN_AID (FIX_LEN_AID + VAR_LEN_AID) +#define LEN_ELVSS (FIX_LEN_ELVSS + VAR_LEN_ELVSS) +#define LEN_GAMMA (FIX_LEN_GAMMA + VAR_LEN_GAMMA) + +struct s6e88a0_ams427ap24 { + struct drm_panel panel; + struct backlight_device *bl_dev; + struct mipi_dsi_device *dsi; + struct regulator_bulk_data *supplies; + struct gpio_desc *reset_gpio; + bool flip_horizontal; +}; + +static const struct regulator_bulk_data s6e88a0_ams427ap24_supplies[] = { + { .supply = "vdd3" }, + { .supply = "vci" }, +}; + +static inline +struct s6e88a0_ams427ap24 *to_s6e88a0_ams427ap24(struct drm_panel *panel) +{ + return container_of(panel, struct s6e88a0_ams427ap24, panel); +} + +enum candela { + CANDELA_10CD, /* 0 */ + CANDELA_11CD, + CANDELA_12CD, + CANDELA_13CD, + CANDELA_14CD, + CANDELA_15CD, + CANDELA_16CD, + CANDELA_17CD, + CANDELA_19CD, + CANDELA_20CD, + CANDELA_21CD, + CANDELA_22CD, + CANDELA_24CD, + CANDELA_25CD, + CANDELA_27CD, + CANDELA_29CD, + CANDELA_30CD, + CANDELA_32CD, + CANDELA_34CD, + CANDELA_37CD, + CANDELA_39CD, + CANDELA_41CD, + CANDELA_44CD, + CANDELA_47CD, + CANDELA_50CD, + CANDELA_53CD, + CANDELA_56CD, + CANDELA_60CD, + CANDELA_64CD, + CANDELA_68CD, + CANDELA_72CD, + CANDELA_77CD, + CANDELA_82CD, + CANDELA_87CD, + CANDELA_93CD, + CANDELA_98CD, + CANDELA_105CD, + CANDELA_111CD, + CANDELA_119CD, + CANDELA_126CD, + CANDELA_134CD, + CANDELA_143CD, + CANDELA_152CD, + CANDELA_162CD, + CANDELA_172CD, + CANDELA_183CD, + CANDELA_195CD, + CANDELA_207CD, + CANDELA_220CD, + CANDELA_234CD, + CANDELA_249CD, + CANDELA_265CD, + CANDELA_282CD, + CANDELA_300CD, /* 53 */ +}; + +static const int s6e88a0_ams427ap24_br_to_cd[NUM_STEPS_CANDELA] = { + /* columns: brightness from, brightness till, candela */ + /* 0 */ 10, /* 10CD */ + /* 11 */ 11, /* 11CD */ + /* 12 */ 12, /* 12CD */ + /* 13 */ 13, /* 13CD */ + /* 14 */ 14, /* 14CD */ + /* 15 */ 15, /* 15CD */ + /* 16 */ 16, /* 16CD */ + /* 17 */ 17, /* 17CD */ + /* 18 */ 18, /* 19CD */ + /* 19 */ 19, /* 20CD */ + /* 20 */ 20, /* 21CD */ + /* 21 */ 21, /* 22CD */ + /* 22 */ 22, /* 24CD */ + /* 23 */ 23, /* 25CD */ + /* 24 */ 24, /* 27CD */ + /* 25 */ 25, /* 29CD */ + /* 26 */ 26, /* 30CD */ + /* 27 */ 27, /* 32CD */ + /* 28 */ 28, /* 34CD */ + /* 29 */ 29, /* 37CD */ + /* 30 */ 30, /* 39CD */ + /* 31 */ 32, /* 41CD */ + /* 33 */ 34, /* 44CD */ + /* 35 */ 36, /* 47CD */ + /* 37 */ 38, /* 50CD */ + /* 39 */ 40, /* 53CD */ + /* 41 */ 43, /* 56CD */ + /* 44 */ 46, /* 60CD */ + /* 47 */ 49, /* 64CD */ + /* 50 */ 52, /* 68CD */ + /* 53 */ 56, /* 72CD */ + /* 57 */ 59, /* 77CD */ + /* 60 */ 63, /* 82CD */ + /* 64 */ 67, /* 87CD */ + /* 68 */ 71, /* 93CD */ + /* 72 */ 76, /* 98CD */ + /* 77 */ 80, /* 105CD */ + /* 81 */ 86, /* 111CD */ + /* 87 */ 91, /* 119CD */ + /* 92 */ 97, /* 126CD */ + /* 98 */ 104, /* 134CD */ + /* 105 */ 110, /* 143CD */ + /* 111 */ 118, /* 152CD */ + /* 119 */ 125, /* 162CD */ + /* 126 */ 133, /* 172CD */ + /* 134 */ 142, /* 183CD */ + /* 143 */ 150, /* 195CD */ + /* 151 */ 160, /* 207CD */ + /* 161 */ 170, /* 220CD */ + /* 171 */ 181, /* 234CD */ + /* 182 */ 205, /* 249CD */ + /* 206 */ 234, /* 265CD */ + /* 235 */ 254, /* 282CD */ + /* 255 */ 255, /* 300CD */ +}; + +static const u8 s6e88a0_ams427ap24_aid[NUM_STEPS_AID][VAR_LEN_AID] = { + { 0x03, 0x77 }, /* AOR 90.9%, 10CD */ + { 0x03, 0x73 }, /* AOR 90.5%, 11CD */ + { 0x03, 0x69 }, /* AOR 89.4%, 12CD */ + { 0x03, 0x65 }, /* AOR 89.0%, 13CD */ + { 0x03, 0x61 }, /* AOR 88.6%, 14CD */ + { 0x03, 0x55 }, /* AOR 87.4%, 15CD */ + { 0x03, 0x50 }, /* AOR 86.9%, 16CD */ + { 0x03, 0x45 }, /* AOR 85.8%, 17CD */ + { 0x03, 0x35 }, /* AOR 84.1%, 19CD */ + { 0x03, 0x27 }, /* AOR 82.7%, 20CD */ + { 0x03, 0x23 }, /* AOR 82.3%, 21CD */ + { 0x03, 0x17 }, /* AOR 81.0%, 22CD */ + { 0x03, 0x11 }, /* AOR 80.4%, 24CD */ + { 0x03, 0x04 }, /* AOR 79.1%, 25CD */ + { 0x02, 0xf4 }, /* AOR 77.5%, 27CD */ + { 0x02, 0xe3 }, /* AOR 75.7%, 29CD */ + { 0x02, 0xd7 }, /* AOR 74.5%, 30CD */ + { 0x02, 0xc6 }, /* AOR 72.7%, 32CD */ + { 0x02, 0xb7 }, /* AOR 71.2%, 34CD */ + { 0x02, 0xa1 }, /* AOR 69.0%, 37CD */ + { 0x02, 0x91 }, /* AOR 67.3%, 39CD */ + { 0x02, 0x78 }, /* AOR 64.8%, 41CD */ + { 0x02, 0x62 }, /* AOR 62.5%, 44CD */ + { 0x02, 0x45 }, /* AOR 59.5%, 47CD */ + { 0x02, 0x30 }, /* AOR 57.4%, 50CD */ + { 0x02, 0x13 }, /* AOR 54.4%, 53CD */ + { 0x01, 0xf5 }, /* AOR 51.3%, 56CD */ + { 0x01, 0xd3 }, /* AOR 47.8%, 60CD */ + { 0x01, 0xb1 }, /* AOR 44.4%, 64CD */ + { 0x01, 0x87 }, /* AOR 40.1%, 68CD */ + { 0x01, 0x63 }, /* AOR 36.6%, 72CD */ + { 0x01, 0x35 }, /* AOR 31.7%, 77CD */ + { 0x01, 0x05 }, /* AOR 26.9%, 82CD */ + { 0x00, 0xd5 }, /* AOR 21.8%, 87CD */ + { 0x00, 0xa1 }, /* AOR 16.5%, 93CD */ + { 0x00, 0x6f }, /* AOR 11.4%, 98CD */ + { 0x00, 0x31 }, /* AOR 5.0%, 105CD */ + { 0x01, 0x86 }, /* AOR 40.0%, 111CD ~ 172CD */ + { 0x00, 0x08 }, /* AOR 0.6%, 183CD ~ 300CD */ +}; + +static const u8 s6e88a0_ams427ap24_elvss[NUM_STEPS_ELVSS][VAR_LEN_ELVSS] = { + { 0x14 }, /* 10CD ~ 111CD */ + { 0x13 }, /* 119CD */ + { 0x12 }, /* 126CD */ + { 0x12 }, /* 134CD */ + { 0x11 }, /* 143CD */ + { 0x10 }, /* 152CD */ + { 0x0f }, /* 162CD */ + { 0x0e }, /* 172CD */ + { 0x11 }, /* 183CD */ + { 0x11 }, /* 195CD */ + { 0x10 }, /* 207CD */ + { 0x0f }, /* 220CD */ + { 0x0f }, /* 234CD */ + { 0x0e }, /* 249CD */ + { 0x0d }, /* 265CD */ + { 0x0c }, /* 282CD */ + { 0x0b }, /* 300CD */ +}; + +static const u8 s6e88a0_ams427ap24_gamma[NUM_STEPS_CANDELA][VAR_LEN_GAMMA] = { + /* 10CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x8a, 0x8c, 0x8b, + 0x8c, 0x87, 0x89, 0x89, 0x88, 0x87, 0x8c, 0x80, 0x82, 0x88, 0x7b, + 0x72, 0x8c, 0x60, 0x68, 0x8c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + /* 11CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x8a, 0x8c, 0x8b, + 0x8c, 0x87, 0x89, 0x89, 0x88, 0x87, 0x8c, 0x80, 0x82, 0x88, 0x7b, + 0x72, 0x8c, 0x60, 0x68, 0x8c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + /* 12CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x8a, 0x8b, 0x8b, + 0x8c, 0x88, 0x89, 0x8a, 0x88, 0x87, 0x8c, 0x81, 0x82, 0x87, 0x7a, + 0x72, 0x8b, 0x60, 0x68, 0x8c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + /* 13CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x8a, 0x8b, 0x8b, + 0x8c, 0x88, 0x89, 0x8a, 0x88, 0x87, 0x8c, 0x81, 0x82, 0x87, 0x7a, + 0x72, 0x8b, 0x61, 0x69, 0x8c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + /* 14CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8c, 0x8b, + 0x8c, 0x88, 0x89, 0x8a, 0x87, 0x86, 0x8a, 0x82, 0x82, 0x87, 0x79, + 0x71, 0x89, 0x63, 0x6c, 0x8e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + /* 15CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x8a, 0x8c, 0x8c, + 0x8c, 0x86, 0x87, 0x88, 0x85, 0x85, 0x8a, 0x83, 0x83, 0x88, 0x78, + 0x72, 0x89, 0x64, 0x6c, 0x8e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + /* 16CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8c, 0x8b, + 0x8c, 0x86, 0x88, 0x88, 0x86, 0x86, 0x8a, 0x84, 0x84, 0x88, 0x78, + 0x72, 0x89, 0x5d, 0x67, 0x8b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + /* 17CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8b, 0x8b, + 0x8b, 0x87, 0x89, 0x89, 0x86, 0x86, 0x8a, 0x84, 0x83, 0x87, 0x78, + 0x73, 0x89, 0x64, 0x6e, 0x8e, 0x38, 0x32, 0x24, 0x00, 0x00, 0x00 }, + /* 19CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8b, 0x8b, + 0x8b, 0x87, 0x89, 0x89, 0x86, 0x86, 0x89, 0x84, 0x84, 0x87, 0x77, + 0x72, 0x88, 0x65, 0x6f, 0x8e, 0x38, 0x32, 0x24, 0x00, 0x00, 0x00 }, + /* 20CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8b, 0x8b, + 0x8b, 0x88, 0x89, 0x89, 0x85, 0x85, 0x88, 0x82, 0x83, 0x85, 0x79, + 0x73, 0x88, 0x65, 0x6f, 0x8e, 0x38, 0x32, 0x24, 0x00, 0x00, 0x00 }, + /* 21CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8b, 0x8b, + 0x8b, 0x88, 0x89, 0x89, 0x85, 0x85, 0x88, 0x82, 0x83, 0x85, 0x79, + 0x74, 0x88, 0x65, 0x6f, 0x8e, 0x38, 0x32, 0x24, 0x00, 0x00, 0x00 }, + /* 22CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8c, 0x8b, + 0x8c, 0x86, 0x88, 0x87, 0x86, 0x86, 0x89, 0x82, 0x83, 0x85, 0x7c, + 0x75, 0x87, 0x65, 0x6f, 0x8e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + /* 24CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8c, 0x8b, + 0x8c, 0x86, 0x88, 0x87, 0x86, 0x86, 0x89, 0x82, 0x83, 0x85, 0x7c, + 0x76, 0x86, 0x66, 0x6f, 0x8e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + /* 25CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8b, 0x8b, + 0x8b, 0x86, 0x89, 0x88, 0x87, 0x87, 0x89, 0x82, 0x82, 0x84, 0x7f, + 0x7a, 0x89, 0x6b, 0x73, 0x8f, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 27CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8b, 0x8b, + 0x8b, 0x86, 0x89, 0x88, 0x87, 0x87, 0x89, 0x82, 0x82, 0x84, 0x7f, + 0x7a, 0x89, 0x6b, 0x73, 0x8f, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 29CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8b, 0x8b, + 0x8b, 0x86, 0x89, 0x88, 0x85, 0x84, 0x87, 0x84, 0x85, 0x86, 0x80, + 0x7b, 0x88, 0x6a, 0x73, 0x8f, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 30CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8b, 0x8b, + 0x8b, 0x86, 0x89, 0x88, 0x85, 0x84, 0x87, 0x84, 0x85, 0x86, 0x80, + 0x7b, 0x88, 0x6a, 0x73, 0x8f, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 32CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8b, 0x8b, + 0x8b, 0x86, 0x89, 0x88, 0x85, 0x84, 0x87, 0x84, 0x85, 0x86, 0x80, + 0x7b, 0x88, 0x6a, 0x73, 0x8f, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 34CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8b, 0x8b, + 0x8b, 0x86, 0x89, 0x88, 0x85, 0x84, 0x87, 0x83, 0x84, 0x84, 0x7f, + 0x79, 0x86, 0x6c, 0x76, 0x91, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 37CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8b, 0x89, 0x89, 0x8b, 0x8b, + 0x8b, 0x86, 0x88, 0x88, 0x87, 0x86, 0x87, 0x83, 0x84, 0x84, 0x7f, + 0x79, 0x86, 0x6c, 0x76, 0x90, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 39CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8b, 0x89, 0x89, 0x8b, 0x8b, + 0x8b, 0x86, 0x88, 0x87, 0x84, 0x84, 0x86, 0x83, 0x85, 0x85, 0x80, + 0x79, 0x85, 0x6c, 0x76, 0x90, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 41CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8b, 0x89, 0x89, 0x8b, 0x8b, + 0x8b, 0x86, 0x88, 0x87, 0x84, 0x84, 0x86, 0x81, 0x84, 0x83, 0x7f, + 0x79, 0x84, 0x6e, 0x79, 0x93, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 44CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8b, 0x89, 0x89, 0x8b, 0x8b, + 0x8b, 0x86, 0x88, 0x87, 0x84, 0x84, 0x86, 0x81, 0x84, 0x83, 0x7f, + 0x79, 0x84, 0x6e, 0x79, 0x92, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 47CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8b, 0x89, 0x89, 0x8b, 0x8b, + 0x8b, 0x86, 0x88, 0x87, 0x84, 0x85, 0x86, 0x81, 0x84, 0x83, 0x7f, + 0x79, 0x83, 0x6f, 0x79, 0x91, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 50CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8b, 0x89, 0x89, 0x8b, 0x8b, + 0x8b, 0x86, 0x88, 0x87, 0x84, 0x85, 0x86, 0x82, 0x84, 0x83, 0x7f, + 0x79, 0x83, 0x6f, 0x79, 0x90, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 53CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8b, 0x89, 0x89, 0x8b, 0x8b, + 0x8b, 0x86, 0x88, 0x87, 0x83, 0x83, 0x85, 0x84, 0x85, 0x85, 0x7f, + 0x79, 0x83, 0x70, 0x79, 0x8f, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 56CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8b, 0x89, 0x89, 0x8b, 0x8a, + 0x8a, 0x87, 0x89, 0x87, 0x83, 0x83, 0x85, 0x84, 0x85, 0x84, 0x7f, + 0x79, 0x82, 0x70, 0x7a, 0x8e, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 60CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8b, 0x89, 0x89, 0x8b, 0x8a, + 0x8a, 0x87, 0x89, 0x87, 0x83, 0x83, 0x85, 0x84, 0x85, 0x84, 0x7e, + 0x79, 0x82, 0x71, 0x7a, 0x8d, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 64CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8b, 0x89, 0x89, 0x8b, 0x8a, + 0x8a, 0x86, 0x88, 0x86, 0x84, 0x84, 0x86, 0x82, 0x83, 0x82, 0x80, + 0x7a, 0x84, 0x71, 0x7a, 0x8c, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 68CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8a, 0x89, 0x89, 0x8c, 0x8a, + 0x8a, 0x86, 0x88, 0x86, 0x84, 0x84, 0x86, 0x82, 0x84, 0x82, 0x81, + 0x7b, 0x83, 0x72, 0x7b, 0x8b, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 72CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8a, 0x89, 0x89, 0x8c, 0x8a, + 0x8a, 0x86, 0x88, 0x86, 0x85, 0x85, 0x86, 0x82, 0x84, 0x82, 0x81, + 0x7b, 0x83, 0x72, 0x7c, 0x8a, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 77CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8a, 0x89, 0x89, 0x8c, 0x8a, + 0x8a, 0x85, 0x87, 0x85, 0x85, 0x87, 0x87, 0x82, 0x84, 0x82, 0x81, + 0x7c, 0x82, 0x72, 0x7c, 0x89, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 82CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8a, 0x89, 0x89, 0x8c, 0x8a, + 0x8a, 0x85, 0x87, 0x85, 0x85, 0x87, 0x87, 0x82, 0x84, 0x82, 0x81, + 0x7c, 0x82, 0x73, 0x7c, 0x88, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 87CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8a, 0x89, 0x89, 0x8c, 0x8a, + 0x8a, 0x85, 0x87, 0x85, 0x84, 0x84, 0x86, 0x80, 0x84, 0x81, 0x80, + 0x7a, 0x82, 0x76, 0x7f, 0x89, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 93CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8a, 0x89, 0x89, 0x8b, 0x8a, + 0x8a, 0x86, 0x87, 0x85, 0x84, 0x85, 0x86, 0x80, 0x84, 0x80, 0x80, + 0x7a, 0x82, 0x76, 0x80, 0x88, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 98CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8a, 0x89, 0x89, 0x8b, 0x8a, + 0x8a, 0x86, 0x87, 0x85, 0x85, 0x85, 0x86, 0x80, 0x84, 0x80, 0x80, + 0x7a, 0x82, 0x76, 0x80, 0x88, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 105CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x89, 0x88, 0x88, 0x8b, 0x8a, + 0x8a, 0x84, 0x87, 0x85, 0x85, 0x85, 0x85, 0x80, 0x84, 0x80, 0x7f, + 0x79, 0x81, 0x71, 0x7d, 0x87, 0x38, 0x32, 0x24, 0x00, 0x00, 0x00 }, + /* 111CD */ + { 0x00, 0xdf, 0x00, 0xde, 0x00, 0xde, 0x85, 0x85, 0x84, 0x87, 0x86, + 0x87, 0x85, 0x86, 0x85, 0x83, 0x83, 0x83, 0x81, 0x82, 0x82, 0x80, + 0x7d, 0x82, 0x75, 0x7f, 0x86, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 }, + /* 119CD */ + { 0x00, 0xe3, 0x00, 0xe1, 0x00, 0xe2, 0x85, 0x85, 0x84, 0x86, 0x85, + 0x85, 0x84, 0x85, 0x84, 0x83, 0x83, 0x83, 0x82, 0x82, 0x82, 0x7e, + 0x7b, 0x81, 0x75, 0x7f, 0x86, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 }, + /* 126CD */ + { 0x00, 0xe6, 0x00, 0xe5, 0x00, 0xe5, 0x85, 0x84, 0x84, 0x85, 0x85, + 0x85, 0x84, 0x84, 0x84, 0x82, 0x83, 0x83, 0x80, 0x81, 0x81, 0x80, + 0x7f, 0x83, 0x73, 0x7c, 0x84, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 }, + /* 134CD */ + { 0x00, 0xe9, 0x00, 0xe8, 0x00, 0xe8, 0x84, 0x84, 0x83, 0x85, 0x85, + 0x85, 0x84, 0x84, 0x83, 0x81, 0x82, 0x82, 0x81, 0x81, 0x81, 0x7f, + 0x7d, 0x81, 0x73, 0x7c, 0x83, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 }, + /* 143CD */ + { 0x00, 0xed, 0x00, 0xec, 0x00, 0xec, 0x84, 0x83, 0x83, 0x84, 0x84, + 0x84, 0x84, 0x84, 0x83, 0x82, 0x83, 0x83, 0x81, 0x80, 0x81, 0x7f, + 0x7e, 0x81, 0x70, 0x79, 0x81, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 }, + /* 152CD */ + { 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x83, 0x83, 0x83, 0x83, 0x83, + 0x83, 0x84, 0x84, 0x83, 0x81, 0x81, 0x81, 0x80, 0x80, 0x81, 0x80, + 0x80, 0x82, 0x6f, 0x78, 0x7f, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 }, + /* 162CD */ + { 0x00, 0xf4, 0x00, 0xf3, 0x00, 0xf4, 0x83, 0x83, 0x83, 0x83, 0x83, + 0x83, 0x82, 0x81, 0x81, 0x81, 0x81, 0x81, 0x80, 0x80, 0x81, 0x80, + 0x7f, 0x82, 0x6f, 0x78, 0x7f, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 }, + /* 172CD */ + { 0x00, 0xf8, 0x00, 0xf8, 0x00, 0xf8, 0x82, 0x82, 0x82, 0x82, 0x82, + 0x82, 0x82, 0x81, 0x81, 0x80, 0x81, 0x80, 0x80, 0x80, 0x81, 0x81, + 0x80, 0x83, 0x6d, 0x76, 0x7d, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 }, + /* 183CD */ + { 0x00, 0xe0, 0x00, 0xdf, 0x00, 0xdf, 0x84, 0x84, 0x83, 0x86, 0x86, + 0x86, 0x83, 0x84, 0x83, 0x82, 0x82, 0x82, 0x81, 0x83, 0x81, 0x81, + 0x7e, 0x81, 0x80, 0x82, 0x84, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 }, + /* 195CD */ + { 0x00, 0xe4, 0x00, 0xe3, 0x00, 0xe3, 0x84, 0x83, 0x83, 0x85, 0x85, + 0x85, 0x83, 0x84, 0x83, 0x81, 0x82, 0x82, 0x82, 0x83, 0x81, 0x81, + 0x80, 0x82, 0x7d, 0x7f, 0x81, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 }, + /* 207CD */ + { 0x00, 0xe7, 0x00, 0xe6, 0x00, 0xe6, 0x83, 0x82, 0x82, 0x85, 0x85, + 0x85, 0x82, 0x83, 0x83, 0x82, 0x82, 0x82, 0x80, 0x81, 0x80, 0x81, + 0x80, 0x82, 0x7d, 0x7f, 0x81, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 }, + /* 220CD */ + { 0x00, 0xeb, 0x00, 0xea, 0x00, 0xea, 0x83, 0x83, 0x82, 0x84, 0x84, + 0x84, 0x82, 0x83, 0x82, 0x81, 0x81, 0x82, 0x81, 0x82, 0x81, 0x80, + 0x7e, 0x80, 0x7d, 0x7f, 0x81, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 }, + /* 234CD */ + { 0x00, 0xef, 0x00, 0xee, 0x00, 0xee, 0x83, 0x82, 0x82, 0x83, 0x83, + 0x83, 0x82, 0x82, 0x82, 0x81, 0x81, 0x81, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x81, 0x7b, 0x7c, 0x7f, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 }, + /* 249CD */ + { 0x00, 0xf3, 0x00, 0xf2, 0x00, 0xf2, 0x82, 0x81, 0x81, 0x83, 0x83, + 0x83, 0x82, 0x82, 0x82, 0x81, 0x81, 0x81, 0x80, 0x81, 0x80, 0x7f, + 0x7e, 0x7f, 0x7b, 0x7c, 0x7f, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 }, + /* 265CD */ + { 0x00, 0xf7, 0x00, 0xf7, 0x00, 0xf7, 0x81, 0x81, 0x80, 0x82, 0x82, + 0x82, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x81, 0x80, 0x7f, + 0x7e, 0x7f, 0x7b, 0x7c, 0x7f, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 }, + /* 282CD */ + { 0x00, 0xfb, 0x00, 0xfb, 0x00, 0xfb, 0x80, 0x80, 0x80, 0x81, 0x81, + 0x81, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x78, 0x79, 0x7d, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 }, + /* 300CD */ + { 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00 }, +}; + +static int s6e88a0_ams427ap24_set_brightness(struct backlight_device *bd) +{ + struct s6e88a0_ams427ap24 *ctx = bl_get_data(bd); + struct mipi_dsi_device *dsi = ctx->dsi; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; + struct device *dev = &dsi->dev; + int brightness = bd->props.brightness; + int candela_enum; + u8 b2[LEN_AID] = { 0xb2, 0x40, 0x08, 0x20, 0x00, 0x00 }; + u8 b6[LEN_ELVSS] = { 0xb6, 0x28, 0x00 }; + u8 ca[LEN_GAMMA]; + + /* get candela enum from brightness */ + for (candela_enum = 0; candela_enum < NUM_STEPS_CANDELA; candela_enum++) + if (brightness <= s6e88a0_ams427ap24_br_to_cd[candela_enum]) + break; + + /* get aid */ + switch (candela_enum) { + case CANDELA_10CD ... CANDELA_105CD: + memcpy(&b2[FIX_LEN_AID], + s6e88a0_ams427ap24_aid[candela_enum], + VAR_LEN_AID); + break; + case CANDELA_111CD ... CANDELA_172CD: + memcpy(&b2[FIX_LEN_AID], + s6e88a0_ams427ap24_aid[CANDELA_111CD], + VAR_LEN_AID); + break; + case CANDELA_183CD ... CANDELA_300CD: + memcpy(&b2[FIX_LEN_AID], + s6e88a0_ams427ap24_aid[CANDELA_111CD + 1], + VAR_LEN_AID); + break; + default: + dev_err(dev, "Failed to get aid data\n"); + return -EINVAL; + } + + /* get elvss */ + if (candela_enum <= CANDELA_111CD) { + memcpy(&b6[FIX_LEN_ELVSS], + s6e88a0_ams427ap24_elvss[0], + VAR_LEN_ELVSS); + } else { + memcpy(&b6[FIX_LEN_ELVSS], + s6e88a0_ams427ap24_elvss[candela_enum - CANDELA_111CD], + VAR_LEN_ELVSS); + } + + /* get gamma */ + ca[0] = 0xca; + memcpy(&ca[FIX_LEN_GAMMA], + s6e88a0_ams427ap24_gamma[candela_enum], + VAR_LEN_GAMMA); + + /* write data */ + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0x5a, 0x5a); // level 1 key on + mipi_dsi_dcs_write_buffer_multi(&dsi_ctx, b2, ARRAY_SIZE(b2)); // set aid + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x55, 0x00); // acl off + mipi_dsi_dcs_write_buffer_multi(&dsi_ctx, b6, ARRAY_SIZE(b6)); // set elvss + mipi_dsi_dcs_write_buffer_multi(&dsi_ctx, ca, ARRAY_SIZE(ca)); // set gamma + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf7, 0x03); // gamma update + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0xa5, 0xa5); // level 1 key off + + return dsi_ctx.accum_err; +} + +static void s6e88a0_ams427ap24_reset(struct s6e88a0_ams427ap24 *ctx) +{ + gpiod_set_value_cansleep(ctx->reset_gpio, 0); + usleep_range(5000, 6000); + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + usleep_range(1000, 2000); + gpiod_set_value_cansleep(ctx->reset_gpio, 0); + usleep_range(18000, 19000); +} + +static int s6e88a0_ams427ap24_on(struct s6e88a0_ams427ap24 *ctx) +{ + struct mipi_dsi_device *dsi = ctx->dsi; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; + struct device *dev = &dsi->dev; + int ret; + + dsi->mode_flags |= MIPI_DSI_MODE_LPM; + + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0x5a, 0x5a); // level 1 key on + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfc, 0x5a, 0x5a); // level 2 key on + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x11); // src latch set global 1 + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfd, 0x11); // src latch set 1 + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x13); // src latch set global 2 + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfd, 0x18); // src latch set 2 + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x02); // avdd set 1 + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb8, 0x30); // avdd set 2 + + mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 20); + + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf1, 0x5a, 0x5a); // level 3 key on + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xcc, 0x4c); // pixel clock divider pol. + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf2, 0x03, 0x0d); // unknown + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf1, 0xa5, 0xa5); // level 3 key off + + if (ctx->flip_horizontal) + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xcb, 0x0e); // flip display + + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0xa5, 0xa5); // level 1 key off + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfc, 0xa5, 0xa5); // level 2 key off + + ret = s6e88a0_ams427ap24_set_brightness(ctx->bl_dev); + if (ret < 0) { + dev_err(dev, "Failed to set brightness: %d\n", ret); + return ret; + } + + mipi_dsi_dcs_set_display_on_multi(&dsi_ctx); + + return dsi_ctx.accum_err; +} + +static int s6e88a0_ams427ap24_off(struct s6e88a0_ams427ap24 *ctx) +{ + struct mipi_dsi_device *dsi = ctx->dsi; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; + + dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; + + mipi_dsi_dcs_set_display_off_multi(&dsi_ctx); + mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 120); + + return dsi_ctx.accum_err; +} + +static int s6e88a0_ams427ap24_prepare(struct drm_panel *panel) +{ + struct s6e88a0_ams427ap24 *ctx = to_s6e88a0_ams427ap24(panel); + struct device *dev = &ctx->dsi->dev; + int ret; + + ret = regulator_bulk_enable(ARRAY_SIZE(s6e88a0_ams427ap24_supplies), + ctx->supplies); + if (ret < 0) { + dev_err(dev, "Failed to enable regulators: %d\n", ret); + return ret; + } + + s6e88a0_ams427ap24_reset(ctx); + + ret = s6e88a0_ams427ap24_on(ctx); + if (ret < 0) { + dev_err(dev, "Failed to initialize panel: %d\n", ret); + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + regulator_bulk_disable(ARRAY_SIZE(s6e88a0_ams427ap24_supplies), + ctx->supplies); + return ret; + } + + return 0; +} + +static int s6e88a0_ams427ap24_unprepare(struct drm_panel *panel) +{ + struct s6e88a0_ams427ap24 *ctx = to_s6e88a0_ams427ap24(panel); + struct device *dev = &ctx->dsi->dev; + int ret; + + ret = s6e88a0_ams427ap24_off(ctx); + if (ret < 0) + dev_err(dev, "Failed to un-initialize panel: %d\n", ret); + + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + regulator_bulk_disable(ARRAY_SIZE(s6e88a0_ams427ap24_supplies), + ctx->supplies); + + return 0; +} + +static const struct drm_display_mode s6e88a0_ams427ap24_mode = { + .clock = (540 + 94 + 4 + 18) * (960 + 12 + 1 + 3) * 60 / 1000, + .hdisplay = 540, + .hsync_start = 540 + 94, + .hsync_end = 540 + 94 + 4, + .htotal = 540 + 94 + 4 + 18, + .vdisplay = 960, + .vsync_start = 960 + 12, + .vsync_end = 960 + 12 + 1, + .vtotal = 960 + 12 + 1 + 3, + .width_mm = 55, + .height_mm = 95, + .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, +}; + +static int s6e88a0_ams427ap24_get_modes(struct drm_panel *panel, + struct drm_connector *connector) +{ + return drm_connector_helper_get_modes_fixed(connector, + &s6e88a0_ams427ap24_mode); +} + +static const struct drm_panel_funcs s6e88a0_ams427ap24_panel_funcs = { + .prepare = s6e88a0_ams427ap24_prepare, + .unprepare = s6e88a0_ams427ap24_unprepare, + .get_modes = s6e88a0_ams427ap24_get_modes, +}; + +static const struct backlight_ops s6e88a0_ams427ap24_bl_ops = { + .update_status = s6e88a0_ams427ap24_set_brightness, +}; + +static int s6e88a0_ams427ap24_register_backlight(struct s6e88a0_ams427ap24 *ctx) +{ + struct backlight_properties props = { + .type = BACKLIGHT_RAW, + .brightness = 180, + .max_brightness = 255, + }; + struct mipi_dsi_device *dsi = ctx->dsi; + struct device *dev = &dsi->dev; + int ret = 0; + + ctx->bl_dev = devm_backlight_device_register(dev, dev_name(dev), dev, ctx, + &s6e88a0_ams427ap24_bl_ops, + &props); + if (IS_ERR(ctx->bl_dev)) { + ret = PTR_ERR(ctx->bl_dev); + dev_err(dev, "error registering backlight device (%d)\n", ret); + } + + return ret; +} + +static int s6e88a0_ams427ap24_probe(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + struct s6e88a0_ams427ap24 *ctx; + int ret; + + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ret = devm_regulator_bulk_get_const(dev, + ARRAY_SIZE(s6e88a0_ams427ap24_supplies), + s6e88a0_ams427ap24_supplies, + &ctx->supplies); + if (ret < 0) + return ret; + + ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(ctx->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio), + "Failed to get reset-gpios\n"); + + ctx->dsi = dsi; + mipi_dsi_set_drvdata(dsi, ctx); + + dsi->lanes = 2; + dsi->format = MIPI_DSI_FMT_RGB888; + dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | + MIPI_DSI_MODE_NO_EOT_PACKET | MIPI_DSI_MODE_VIDEO_NO_HFP; + + drm_panel_init(&ctx->panel, dev, &s6e88a0_ams427ap24_panel_funcs, + DRM_MODE_CONNECTOR_DSI); + ctx->panel.prepare_prev_first = true; + + ctx->flip_horizontal = device_property_read_bool(dev, "flip-horizontal"); + + ret = s6e88a0_ams427ap24_register_backlight(ctx); + if (ret < 0) + return ret; + + drm_panel_add(&ctx->panel); + + ret = mipi_dsi_attach(dsi); + if (ret < 0) { + dev_err(dev, "Failed to attach to DSI host: %d\n", ret); + drm_panel_remove(&ctx->panel); + return ret; + } + + return 0; +} + +static void s6e88a0_ams427ap24_remove(struct mipi_dsi_device *dsi) +{ + struct s6e88a0_ams427ap24 *ctx = mipi_dsi_get_drvdata(dsi); + int ret; + + ret = mipi_dsi_detach(dsi); + if (ret < 0) + dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret); + + drm_panel_remove(&ctx->panel); +} + +static const struct of_device_id s6e88a0_ams427ap24_of_match[] = { + { .compatible = "samsung,s6e88a0-ams427ap24" }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, s6e88a0_ams427ap24_of_match); + +static struct mipi_dsi_driver s6e88a0_ams427ap24_driver = { + .probe = s6e88a0_ams427ap24_probe, + .remove = s6e88a0_ams427ap24_remove, + .driver = { + .name = "panel-s6e88a0-ams427ap24", + .of_match_table = s6e88a0_ams427ap24_of_match, + }, +}; +module_mipi_dsi_driver(s6e88a0_ams427ap24_driver); + +MODULE_AUTHOR("Jakob Hauser <jahau@rocketmail.com>"); +MODULE_DESCRIPTION("Samsung AMS427AP24 panel with S6E88A0 controller"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c index 8a3fe531c641..7d1b421ea9dd 100644 --- a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c +++ b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c @@ -297,7 +297,7 @@ static struct platform_driver seiko_panel_platform_driver = { .of_match_table = platform_of_match, }, .probe = seiko_panel_platform_probe, - .remove_new = seiko_panel_remove, + .remove = seiko_panel_remove, }; module_platform_driver(seiko_panel_platform_driver); diff --git a/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c index 76bd9e810827..a9673a52b861 100644 --- a/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c +++ b/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c @@ -207,7 +207,7 @@ MODULE_DEVICE_TABLE(of, ls037v7dw01_of_match); static struct platform_driver ls037v7dw01_driver = { .probe = ls037v7dw01_probe, - .remove_new = ls037v7dw01_remove, + .remove = ls037v7dw01_remove, .driver = { .name = "panel-sharp-ls037v7dw01", .of_match_table = ls037v7dw01_of_match, diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 86735430462f..222c170dde8b 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -4565,6 +4565,31 @@ static const struct panel_desc yes_optoelectronics_ytc700tlag_05_201c = { .connector_type = DRM_MODE_CONNECTOR_LVDS, }; +static const struct drm_display_mode mchp_ac69t88a_mode = { + .clock = 25000, + .hdisplay = 800, + .hsync_start = 800 + 88, + .hsync_end = 800 + 88 + 5, + .htotal = 800 + 88 + 5 + 40, + .vdisplay = 480, + .vsync_start = 480 + 23, + .vsync_end = 480 + 23 + 5, + .vtotal = 480 + 23 + 5 + 1, +}; + +static const struct panel_desc mchp_ac69t88a = { + .modes = &mchp_ac69t88a_mode, + .num_modes = 1, + .bpc = 8, + .size = { + .width = 108, + .height = 65, + }, + .bus_flags = DRM_BUS_FLAG_DE_HIGH, + .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA, + .connector_type = DRM_MODE_CONNECTOR_LVDS, +}; + static const struct drm_display_mode arm_rtsm_mode[] = { { .clock = 65000, @@ -5049,6 +5074,9 @@ static const struct of_device_id platform_of_match[] = { .compatible = "yes-optoelectronics,ytc700tlag-05-201c", .data = &yes_optoelectronics_ytc700tlag_05_201c, }, { + .compatible = "microchip,ac69t88a", + .data = &mchp_ac69t88a, + }, { /* Must be the last entry */ .compatible = "panel-dpi", .data = &panel_dpi, @@ -5092,7 +5120,7 @@ static struct platform_driver panel_simple_platform_driver = { .pm = &panel_simple_pm_ops, }, .probe = panel_simple_platform_probe, - .remove_new = panel_simple_platform_remove, + .remove = panel_simple_platform_remove, .shutdown = panel_simple_platform_shutdown, }; diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c index 2d30da38c2c3..3385fd3ef41a 100644 --- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c +++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c @@ -38,7 +38,7 @@ static int panfrost_devfreq_target(struct device *dev, unsigned long *freq, return PTR_ERR(opp); dev_pm_opp_put(opp); - err = dev_pm_opp_set_rate(dev, *freq); + err = dev_pm_opp_set_rate(dev, *freq); if (!err) ptdev->pfdevfreq.current_frequency = *freq; @@ -182,6 +182,7 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev) * if any and will avoid a switch off by regulator_late_cleanup() */ ret = dev_pm_opp_set_opp(dev, opp); + dev_pm_opp_put(opp); if (ret) { DRM_DEV_ERROR(dev, "Couldn't set recommended OPP\n"); return ret; diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index 04d615df5259..ee3864476eb9 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -866,7 +866,7 @@ MODULE_DEVICE_TABLE(of, dt_match); static struct platform_driver panfrost_driver = { .probe = panfrost_probe, - .remove_new = panfrost_remove, + .remove = panfrost_remove, .driver = { .name = "panfrost", .pm = pm_ptr(&panfrost_pm_ops), diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c index f19f918e2330..f5abde3866fb 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gpu.c +++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c @@ -177,7 +177,6 @@ static void panfrost_gpu_init_quirks(struct panfrost_device *pfdev) struct panfrost_model { const char *name; u32 id; - u32 id_mask; u64 features; u64 issues; struct { diff --git a/drivers/gpu/drm/panthor/panthor_devfreq.c b/drivers/gpu/drm/panthor/panthor_devfreq.c index 9d0f891b9b53..ecc7a52bd688 100644 --- a/drivers/gpu/drm/panthor/panthor_devfreq.c +++ b/drivers/gpu/drm/panthor/panthor_devfreq.c @@ -163,13 +163,6 @@ int panthor_devfreq_init(struct panthor_device *ptdev) cur_freq = clk_get_rate(ptdev->clks.core); - opp = devfreq_recommended_opp(dev, &cur_freq, 0); - if (IS_ERR(opp)) - return PTR_ERR(opp); - - panthor_devfreq_profile.initial_freq = cur_freq; - ptdev->current_frequency = cur_freq; - /* Regulator coupling only takes care of synchronizing/balancing voltage * updates, but the coupled regulator needs to be enabled manually. * @@ -200,18 +193,24 @@ int panthor_devfreq_init(struct panthor_device *ptdev) return ret; } + opp = devfreq_recommended_opp(dev, &cur_freq, 0); + if (IS_ERR(opp)) + return PTR_ERR(opp); + + panthor_devfreq_profile.initial_freq = cur_freq; + ptdev->current_frequency = cur_freq; + /* * Set the recommend OPP this will enable and configure the regulator * if any and will avoid a switch off by regulator_late_cleanup() */ ret = dev_pm_opp_set_opp(dev, opp); + dev_pm_opp_put(opp); if (ret) { DRM_DEV_ERROR(dev, "Couldn't set recommended OPP\n"); return ret; } - dev_pm_opp_put(opp); - /* Find the fastest defined rate */ opp = dev_pm_opp_find_freq_floor(dev, &freq); if (IS_ERR(opp)) diff --git a/drivers/gpu/drm/panthor/panthor_device.c b/drivers/gpu/drm/panthor/panthor_device.c index 4082c8f2951d..6fbff516c1c1 100644 --- a/drivers/gpu/drm/panthor/panthor_device.c +++ b/drivers/gpu/drm/panthor/panthor_device.c @@ -390,11 +390,15 @@ int panthor_device_mmap_io(struct panthor_device *ptdev, struct vm_area_struct * { u64 offset = (u64)vma->vm_pgoff << PAGE_SHIFT; + if ((vma->vm_flags & VM_SHARED) == 0) + return -EINVAL; + switch (offset) { case DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET: if (vma->vm_end - vma->vm_start != PAGE_SIZE || (vma->vm_flags & (VM_WRITE | VM_EXEC))) return -EINVAL; + vm_flags_clear(vma, VM_MAYWRITE); break; diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c index ac7e53f6e3f0..0b3fbee3d37a 100644 --- a/drivers/gpu/drm/panthor/panthor_drv.c +++ b/drivers/gpu/drm/panthor/panthor_drv.c @@ -1589,7 +1589,7 @@ static DEFINE_RUNTIME_DEV_PM_OPS(panthor_pm_ops, static struct platform_driver panthor_driver = { .probe = panthor_probe, - .remove_new = panthor_remove, + .remove = panthor_remove, .driver = { .name = "panthor", .pm = pm_ptr(&panthor_pm_ops), diff --git a/drivers/gpu/drm/panthor/panthor_fw.c b/drivers/gpu/drm/panthor/panthor_fw.c index 631f639b8b86..ecca5565ce41 100644 --- a/drivers/gpu/drm/panthor/panthor_fw.c +++ b/drivers/gpu/drm/panthor/panthor_fw.c @@ -500,6 +500,7 @@ static int panthor_fw_load_section_entry(struct panthor_device *ptdev, struct panthor_fw_binary_iter *iter, u32 ehdr) { + ssize_t vm_pgsz = panthor_vm_page_size(ptdev->fw->vm); struct panthor_fw_binary_section_entry_hdr hdr; struct panthor_fw_section *section; u32 section_size; @@ -528,8 +529,7 @@ static int panthor_fw_load_section_entry(struct panthor_device *ptdev, return -EINVAL; } - if ((hdr.va.start & ~PAGE_MASK) != 0 || - (hdr.va.end & ~PAGE_MASK) != 0) { + if (!IS_ALIGNED(hdr.va.start, vm_pgsz) || !IS_ALIGNED(hdr.va.end, vm_pgsz)) { drm_err(&ptdev->base, "Firmware corrupted, virtual addresses not page aligned: 0x%x-0x%x\n", hdr.va.start, hdr.va.end); return -EINVAL; diff --git a/drivers/gpu/drm/panthor/panthor_gem.c b/drivers/gpu/drm/panthor/panthor_gem.c index c60b599665d8..8244a4e6c2a2 100644 --- a/drivers/gpu/drm/panthor/panthor_gem.c +++ b/drivers/gpu/drm/panthor/panthor_gem.c @@ -44,8 +44,7 @@ void panthor_kernel_bo_destroy(struct panthor_kernel_bo *bo) to_panthor_bo(bo->obj)->exclusive_vm_root_gem != panthor_vm_root_gem(vm))) goto out_free_bo; - ret = panthor_vm_unmap_range(vm, bo->va_node.start, - panthor_kernel_bo_size(bo)); + ret = panthor_vm_unmap_range(vm, bo->va_node.start, bo->va_node.size); if (ret) goto out_free_bo; @@ -95,10 +94,16 @@ panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm, } bo = to_panthor_bo(&obj->base); - size = obj->base.size; kbo->obj = &obj->base; bo->flags = bo_flags; + /* The system and GPU MMU page size might differ, which becomes a + * problem for FW sections that need to be mapped at explicit address + * since our PAGE_SIZE alignment might cover a VA range that's + * expected to be used for another section. + * Make sure we never map more than we need. + */ + size = ALIGN(size, panthor_vm_page_size(vm)); ret = panthor_vm_alloc_va(vm, gpu_va, size, &kbo->va_node); if (ret) goto err_put_obj; diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c index aa12ed2acfcf..a49132f3778b 100644 --- a/drivers/gpu/drm/panthor/panthor_mmu.c +++ b/drivers/gpu/drm/panthor/panthor_mmu.c @@ -826,6 +826,14 @@ void panthor_vm_idle(struct panthor_vm *vm) mutex_unlock(&ptdev->mmu->as.slots_lock); } +u32 panthor_vm_page_size(struct panthor_vm *vm) +{ + const struct io_pgtable *pgt = io_pgtable_ops_to_pgtable(vm->pgtbl_ops); + u32 pg_shift = ffs(pgt->cfg.pgsize_bitmap) - 1; + + return 1u << pg_shift; +} + static void panthor_vm_stop(struct panthor_vm *vm) { drm_sched_stop(&vm->sched, NULL); @@ -982,6 +990,8 @@ panthor_vm_map_pages(struct panthor_vm *vm, u64 iova, int prot, if (!size) break; + + offset = 0; } return panthor_vm_flush_range(vm, start_iova, iova - start_iova); @@ -1025,12 +1035,13 @@ int panthor_vm_alloc_va(struct panthor_vm *vm, u64 va, u64 size, struct drm_mm_node *va_node) { + ssize_t vm_pgsz = panthor_vm_page_size(vm); int ret; - if (!size || (size & ~PAGE_MASK)) + if (!size || !IS_ALIGNED(size, vm_pgsz)) return -EINVAL; - if (va != PANTHOR_VM_KERNEL_AUTO_VA && (va & ~PAGE_MASK)) + if (va != PANTHOR_VM_KERNEL_AUTO_VA && !IS_ALIGNED(va, vm_pgsz)) return -EINVAL; mutex_lock(&vm->mm_lock); @@ -1571,7 +1582,9 @@ panthor_vm_pool_get_vm(struct panthor_vm_pool *pool, u32 handle) { struct panthor_vm *vm; + xa_lock(&pool->xa); vm = panthor_vm_get(xa_load(&pool->xa, handle)); + xa_unlock(&pool->xa); return vm; } @@ -2366,11 +2379,12 @@ panthor_vm_bind_prepare_op_ctx(struct drm_file *file, const struct drm_panthor_vm_bind_op *op, struct panthor_vm_op_ctx *op_ctx) { + ssize_t vm_pgsz = panthor_vm_page_size(vm); struct drm_gem_object *gem; int ret; /* Aligned on page size. */ - if ((op->va | op->size) & ~PAGE_MASK) + if (!IS_ALIGNED(op->va | op->size, vm_pgsz)) return -EINVAL; switch (op->flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) { diff --git a/drivers/gpu/drm/panthor/panthor_mmu.h b/drivers/gpu/drm/panthor/panthor_mmu.h index 6788771071e3..8d21e83d8aba 100644 --- a/drivers/gpu/drm/panthor/panthor_mmu.h +++ b/drivers/gpu/drm/panthor/panthor_mmu.h @@ -30,6 +30,7 @@ panthor_vm_get_bo_for_va(struct panthor_vm *vm, u64 va, u64 *bo_offset); int panthor_vm_active(struct panthor_vm *vm); void panthor_vm_idle(struct panthor_vm *vm); +u32 panthor_vm_page_size(struct panthor_vm *vm); int panthor_vm_as(struct panthor_vm *vm); int panthor_vm_flush_all(struct panthor_vm *vm); diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c index eb9f6635cc12..ef4bec7ff9c7 100644 --- a/drivers/gpu/drm/panthor/panthor_sched.c +++ b/drivers/gpu/drm/panthor/panthor_sched.c @@ -602,10 +602,11 @@ struct panthor_group { * @timedout: True when a timeout occurred on any of the queues owned by * this group. * - * Timeouts can be reported by drm_sched or by the FW. In any case, any - * timeout situation is unrecoverable, and the group becomes useless. - * We simply wait for all references to be dropped so we can release the - * group object. + * Timeouts can be reported by drm_sched or by the FW. If a reset is required, + * and the group can't be suspended, this also leads to a timeout. In any case, + * any timeout situation is unrecoverable, and the group becomes useless. We + * simply wait for all references to be dropped so we can release the group + * object. */ bool timedout; @@ -2687,6 +2688,12 @@ void panthor_sched_suspend(struct panthor_device *ptdev) csgs_upd_ctx_init(&upd_ctx); while (slot_mask) { u32 csg_id = ffs(slot_mask) - 1; + struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id]; + + /* We consider group suspension failures as fatal and flag the + * group as unusable by setting timedout=true. + */ + csg_slot->group->timedout = true; csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id, CSG_STATE_TERMINATE, @@ -3688,6 +3695,11 @@ panthor_job_create(struct panthor_file *pfile, goto err_put_job; } + if (!group_can_run(job->group)) { + ret = -EINVAL; + goto err_put_job; + } + if (job->queue_idx >= job->group->queue_count || !job->group->queues[job->queue_idx]) { ret = -EINVAL; diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig index 1992df4a82d2..98a148bea628 100644 --- a/drivers/gpu/drm/qxl/Kconfig +++ b/drivers/gpu/drm/qxl/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only config DRM_QXL tristate "QXL virtual GPU" - depends on DRM && PCI && MMU + depends on DRM && PCI && MMU && HAS_IOPORT select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_TTM diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index 4ef1c134d6dd..21f752644242 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c @@ -29,12 +29,12 @@ #include "qxl_drv.h" +#include <linux/aperture.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/vgaarb.h> #include <drm/drm.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_client_setup.h> #include <drm/drm_drv.h> @@ -92,7 +92,7 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) return ret; - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &qxl_driver); + ret = aperture_remove_conflicting_pci_devices(pdev, qxl_driver.name); if (ret) goto disable_pci; diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index fca8b08535a5..6328627b7c34 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -228,10 +228,8 @@ void radeon_dp_aux_init(struct radeon_connector *radeon_connector) { struct drm_device *dev = radeon_connector->base.dev; struct radeon_device *rdev = dev->dev_private; - int ret; radeon_connector->ddc_bus->rec.hpd = radeon_connector->hpd.hpd; - radeon_connector->ddc_bus->aux.dev = radeon_connector->base.kdev; radeon_connector->ddc_bus->aux.drm_dev = radeon_connector->base.dev; if (ASIC_IS_DCE5(rdev)) { if (radeon_auxch) @@ -242,11 +240,8 @@ void radeon_dp_aux_init(struct radeon_connector *radeon_connector) radeon_connector->ddc_bus->aux.transfer = radeon_dp_aux_transfer_atom; } - ret = drm_dp_aux_register(&radeon_connector->ddc_bus->aux); - if (!ret) - radeon_connector->ddc_bus->has_aux = true; - - WARN(ret, "drm_dp_aux_register() failed with error %d\n", ret); + drm_dp_aux_init(&radeon_connector->ddc_bus->aux); + radeon_connector->ddc_bus->has_aux = true; } /***** general DP utility functions *****/ diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 1b2d31c4d77c..ac77d1246b94 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c @@ -2104,7 +2104,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, return -EINVAL; } - offset = radeon_get_ib_value(p, idx+1) << 8; + offset = (u64)radeon_get_ib_value(p, idx+1) << 8; if (offset != track->vgt_strmout_bo_offset[idx_value]) { DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%llx, 0x%x\n", offset, track->vgt_strmout_bo_offset[idx_value]); diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c index 47aa06a9a942..5b69cc8011b4 100644 --- a/drivers/gpu/drm/radeon/radeon_audio.c +++ b/drivers/gpu/drm/radeon/radeon_audio.c @@ -760,16 +760,20 @@ static int radeon_audio_component_get_eld(struct device *kdev, int port, if (!rdev->audio.enabled || !rdev->mode_info.mode_config_initialized) return 0; - list_for_each_entry(encoder, &rdev_to_drm(rdev)->mode_config.encoder_list, head) { + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + const struct drm_connector_helper_funcs *connector_funcs = + connector->helper_private; + encoder = connector_funcs->best_encoder(connector); + + if (!encoder) + continue; + if (!radeon_encoder_is_digital(encoder)) continue; radeon_encoder = to_radeon_encoder(encoder); dig = radeon_encoder->enc_priv; if (!dig->pin || dig->pin->id != port) continue; - connector = radeon_get_connector_for_encoder(encoder); - if (!connector) - continue; *enabled = true; ret = drm_eld_size(connector->eld); memcpy(buf, connector->eld, min(max_bytes, ret)); diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 528a8f3677c2..f9996304d943 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -1255,16 +1255,6 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) goto exit; } } - - if (dret && radeon_connector->hpd.hpd != RADEON_HPD_NONE && - !radeon_hpd_sense(rdev, radeon_connector->hpd.hpd) && - connector->connector_type == DRM_MODE_CONNECTOR_HDMIA) { - DRM_DEBUG_KMS("EDID is readable when HPD disconnected\n"); - schedule_delayed_work(&rdev->hotplug_work, msecs_to_jiffies(1000)); - ret = connector_status_disconnected; - goto exit; - } - if (dret) { radeon_connector->detected_by_load = false; radeon_connector_free_edid(connector); @@ -1786,6 +1776,20 @@ static enum drm_mode_status radeon_dp_mode_valid(struct drm_connector *connector return MODE_OK; } +static int +radeon_connector_late_register(struct drm_connector *connector) +{ + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + int r = 0; + + if (radeon_connector->ddc_bus->has_aux) { + radeon_connector->ddc_bus->aux.dev = radeon_connector->base.kdev; + r = drm_dp_aux_register(&radeon_connector->ddc_bus->aux); + } + + return r; +} + static const struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = { .get_modes = radeon_dp_get_modes, .mode_valid = radeon_dp_mode_valid, @@ -1800,6 +1804,7 @@ static const struct drm_connector_funcs radeon_dp_connector_funcs = { .early_unregister = radeon_connector_unregister, .destroy = radeon_connector_destroy, .force = radeon_dvi_force, + .late_register = radeon_connector_late_register, }; static const struct drm_connector_funcs radeon_edp_connector_funcs = { @@ -1810,6 +1815,7 @@ static const struct drm_connector_funcs radeon_edp_connector_funcs = { .early_unregister = radeon_connector_unregister, .destroy = radeon_connector_destroy, .force = radeon_dvi_force, + .late_register = radeon_connector_late_register, }; static const struct drm_connector_funcs radeon_lvds_bridge_connector_funcs = { @@ -1820,6 +1826,7 @@ static const struct drm_connector_funcs radeon_lvds_bridge_connector_funcs = { .early_unregister = radeon_connector_unregister, .destroy = radeon_connector_destroy, .force = radeon_dvi_force, + .late_register = radeon_connector_late_register, }; void diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 554b236c2328..6f071e61f764 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -35,6 +35,7 @@ #include <linux/vgaarb.h> #include <drm/drm_cache.h> +#include <drm/drm_client_event.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_device.h> #include <drm/drm_file.h> @@ -1542,7 +1543,7 @@ void radeon_device_fini(struct radeon_device *rdev) * Called at driver suspend. */ int radeon_suspend_kms(struct drm_device *dev, bool suspend, - bool fbcon, bool freeze) + bool notify_clients, bool freeze) { struct radeon_device *rdev; struct pci_dev *pdev; @@ -1634,9 +1635,9 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, pci_set_power_state(pdev, PCI_D3hot); } - if (fbcon) { + if (notify_clients) { console_lock(); - radeon_fbdev_set_suspend(rdev, 1); + drm_client_dev_suspend(dev, true); console_unlock(); } return 0; @@ -1649,7 +1650,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, * Returns 0 for success or an error on failure. * Called at driver resume. */ -int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) +int radeon_resume_kms(struct drm_device *dev, bool resume, bool notify_clients) { struct drm_connector *connector; struct radeon_device *rdev = dev->dev_private; @@ -1660,14 +1661,14 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; - if (fbcon) { + if (notify_clients) { console_lock(); } if (resume) { pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); if (pci_enable_device(pdev)) { - if (fbcon) + if (notify_clients) console_unlock(); return -1; } @@ -1730,7 +1731,7 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) /* reset hpd state */ radeon_hpd_init(rdev); /* blat the mode back in */ - if (fbcon) { + if (notify_clients) { drm_helper_resume_force_mode(dev); /* turn on display hw */ drm_modeset_lock_all(dev); @@ -1746,8 +1747,8 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) radeon_pm_compute_clocks(rdev); - if (fbcon) { - radeon_fbdev_set_suspend(rdev, 0); + if (notify_clients) { + drm_client_dev_resume(dev, true); console_unlock(); } diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 900b05d8aa5c..5e958cc223f4 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -29,7 +29,7 @@ * OTHER DEALINGS IN THE SOFTWARE. */ - +#include <linux/aperture.h> #include <linux/compat.h> #include <linux/module.h> #include <linux/pm_runtime.h> @@ -37,7 +37,6 @@ #include <linux/mmu_notifier.h> #include <linux/pci.h> -#include <drm/drm_aperture.h> #include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_file.h> @@ -249,10 +248,9 @@ int radeon_cik_support = 1; MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled (default), 0 = disabled)"); module_param_named(cik_support, radeon_cik_support, int, 0444); -static struct pci_device_id pciidlist[] = { +static const struct pci_device_id pciidlist[] = { radeon_PCI_IDS }; - MODULE_DEVICE_TABLE(pci, pciidlist); static const struct drm_driver kms_driver; @@ -300,7 +298,7 @@ static int radeon_pci_probe(struct pci_dev *pdev, return -EPROBE_DEFER; /* Get rid of things like offb */ - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &kms_driver); + ret = aperture_remove_conflicting_pci_devices(pdev, kms_driver.name); if (ret) return ret; diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 0f723292409e..fafed331e0a0 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -43,7 +43,7 @@ static uint32_t radeon_encoder_clones(struct drm_encoder *encoder) struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_encoder *clone_encoder; - uint32_t index_mask = 0; + uint32_t index_mask = drm_encoder_mask(encoder); int count; /* DIG routing gets problematic */ diff --git a/drivers/gpu/drm/radeon/radeon_fbdev.c b/drivers/gpu/drm/radeon/radeon_fbdev.c index 0aa20c8df546..d4a58bd679db 100644 --- a/drivers/gpu/drm/radeon/radeon_fbdev.c +++ b/drivers/gpu/drm/radeon/radeon_fbdev.c @@ -288,12 +288,6 @@ err_radeon_fbdev_destroy_pinned_object: return ret; } -void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state) -{ - if (rdev_to_drm(rdev)->fb_helper) - drm_fb_helper_set_suspend(rdev_to_drm(rdev)->fb_helper, state); -} - bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj) { struct drm_fb_helper *fb_helper = rdev_to_drm(rdev)->fb_helper; diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 9735f4968b86..bf2d4b16dc2a 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -44,8 +44,6 @@ struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj); int radeon_gem_prime_pin(struct drm_gem_object *obj); void radeon_gem_prime_unpin(struct drm_gem_object *obj); -const struct drm_gem_object_funcs radeon_gem_object_funcs; - static vm_fault_t radeon_gem_fault(struct vm_fault *vmf) { struct ttm_buffer_object *bo = vmf->vma->vm_private_data; @@ -132,7 +130,6 @@ retry: return r; } *obj = &robj->tbo.base; - (*obj)->funcs = &radeon_gem_object_funcs; robj->pid = task_pid_nr(current); mutex_lock(&rdev->gem.mutex); diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 12a1d99a1815..4063d3801e81 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -942,13 +942,10 @@ int radeon_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper, struct drm_fb_helper_surface_size *sizes); #define RADEON_FBDEV_DRIVER_OPS \ .fbdev_probe = radeon_fbdev_driver_fbdev_probe -void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state); bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj); #else #define RADEON_FBDEV_DRIVER_OPS \ .fbdev_probe = NULL -static inline void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state) -{ } static inline bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj) { return false; diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index d0e4b43d155c..7672404fdb29 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -151,6 +151,7 @@ int radeon_bo_create(struct radeon_device *rdev, if (bo == NULL) return -ENOMEM; drm_gem_private_object_init(rdev_to_drm(rdev), &bo->tbo.base, size); + bo->tbo.base.funcs = &radeon_gem_object_funcs; bo->rdev = rdev; bo->surface_reg = -1; INIT_LIST_HEAD(&bo->list); diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 69d0c12fa419..616d25c8c2de 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -219,8 +219,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict, if (old_mem->mem_type == TTM_PL_TT && new_mem->mem_type == TTM_PL_SYSTEM) { radeon_ttm_tt_unbind(bo->bdev, bo->ttm); - ttm_resource_free(bo, &bo->resource); - ttm_bo_assign_mem(bo, new_mem); + ttm_bo_move_null(bo, new_mem); goto out; } if (rdev->ring[radeon_copy_ring_index(rdev)].ready && diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_cmm.c b/drivers/gpu/drm/renesas/rcar-du/rcar_cmm.c index 26a2f5ad8ee5..79b67c406bd6 100644 --- a/drivers/gpu/drm/renesas/rcar-du/rcar_cmm.c +++ b/drivers/gpu/drm/renesas/rcar-du/rcar_cmm.c @@ -201,7 +201,7 @@ MODULE_DEVICE_TABLE(of, rcar_cmm_of_table); static struct platform_driver rcar_cmm_platform_driver = { .probe = rcar_cmm_probe, - .remove_new = rcar_cmm_remove, + .remove = rcar_cmm_remove, .driver = { .name = "rcar-cmm", .of_match_table = rcar_cmm_of_table, diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c index 4e0bafc86f50..f9ecc334c024 100644 --- a/drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c @@ -729,7 +729,7 @@ error: static struct platform_driver rcar_du_platform_driver = { .probe = rcar_du_probe, - .remove_new = rcar_du_remove, + .remove = rcar_du_remove, .shutdown = rcar_du_shutdown, .driver = { .name = "rcar-du", diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_dw_hdmi.c b/drivers/gpu/drm/renesas/rcar-du/rcar_dw_hdmi.c index 119d69d20b23..c0176e5de9a8 100644 --- a/drivers/gpu/drm/renesas/rcar-du/rcar_dw_hdmi.c +++ b/drivers/gpu/drm/renesas/rcar-du/rcar_dw_hdmi.c @@ -108,7 +108,7 @@ MODULE_DEVICE_TABLE(of, rcar_dw_hdmi_of_table); static struct platform_driver rcar_dw_hdmi_platform_driver = { .probe = rcar_dw_hdmi_probe, - .remove_new = rcar_dw_hdmi_remove, + .remove = rcar_dw_hdmi_remove, .driver = { .name = "rcar-dw-hdmi", .of_match_table = rcar_dw_hdmi_of_table, diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c b/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c index 92ba43a6fe38..e8d64583e3bd 100644 --- a/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c +++ b/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c @@ -1018,7 +1018,7 @@ static const struct dev_pm_ops rcar_lvds_pm_ops = { static struct platform_driver rcar_lvds_platform_driver = { .probe = rcar_lvds_probe, - .remove_new = rcar_lvds_remove, + .remove = rcar_lvds_remove, .driver = { .name = "rcar-lvds", .pm = &rcar_lvds_pm_ops, diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c index 2dba7c5ffd2c..8180625d5866 100644 --- a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c +++ b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c @@ -1088,7 +1088,7 @@ MODULE_DEVICE_TABLE(of, rcar_mipi_dsi_of_table); static struct platform_driver rcar_mipi_dsi_platform_driver = { .probe = rcar_mipi_dsi_probe, - .remove_new = rcar_mipi_dsi_remove, + .remove = rcar_mipi_dsi_remove, .driver = { .name = "rcar-mipi-dsi", .of_match_table = rcar_mipi_dsi_of_table, diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c index bbd7003335da..b069efd8ffc3 100644 --- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c +++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c @@ -173,7 +173,7 @@ error: static struct platform_driver rzg2l_du_platform_driver = { .probe = rzg2l_du_probe, - .remove_new = rzg2l_du_remove, + .remove = rzg2l_du_remove, .shutdown = rzg2l_du_shutdown, .driver = { .name = "rzg2l-du", diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c index 10febea473cd..fa7a1ae22aa3 100644 --- a/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c +++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c @@ -798,7 +798,7 @@ MODULE_DEVICE_TABLE(of, rzg2l_mipi_dsi_of_table); static struct platform_driver rzg2l_mipi_dsi_platform_driver = { .probe = rzg2l_mipi_dsi_probe, - .remove_new = rzg2l_mipi_dsi_remove, + .remove = rzg2l_mipi_dsi_remove, .driver = { .name = "rzg2l-mipi-dsi", .pm = &rzg2l_mipi_pm_ops, diff --git a/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c index 8d3effe3f598..76ee3e16077c 100644 --- a/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c +++ b/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c @@ -282,7 +282,7 @@ static const struct of_device_id shmob_drm_of_table[] __maybe_unused = { static struct platform_driver shmob_drm_platform_driver = { .probe = shmob_drm_probe, - .remove_new = shmob_drm_remove, + .remove = shmob_drm_remove, .shutdown = shmob_drm_shutdown, .driver = { .name = "shmob-drm", diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig index 258dd38e1a8e..3ac579615749 100644 --- a/drivers/gpu/drm/rockchip/Kconfig +++ b/drivers/gpu/drm/rockchip/Kconfig @@ -9,6 +9,7 @@ config DRM_ROCKCHIP select VIDEOMODE_HELPERS select DRM_ANALOGIX_DP if ROCKCHIP_ANALOGIX_DP select DRM_DW_HDMI if ROCKCHIP_DW_HDMI + select DRM_DW_HDMI_QP if ROCKCHIP_DW_HDMI_QP select DRM_DW_MIPI_DSI if ROCKCHIP_DW_MIPI_DSI select GENERIC_PHY if ROCKCHIP_DW_MIPI_DSI select GENERIC_PHY_MIPI_DPHY if ROCKCHIP_DW_MIPI_DSI @@ -64,6 +65,14 @@ config ROCKCHIP_DW_HDMI enable HDMI on RK3288 or RK3399 based SoC, you should select this option. +config ROCKCHIP_DW_HDMI_QP + bool "Rockchip specific extensions for Synopsys DW HDMI QP" + select DRM_BRIDGE_CONNECTOR + help + This selects support for Rockchip SoC specific extensions + for the Synopsys DesignWare HDMI QP driver. If you want to + enable HDMI on RK3588 based SoC, you should select this option. + config ROCKCHIP_DW_MIPI_DSI bool "Rockchip specific extensions for Synopsys DW MIPI DSI" select GENERIC_PHY_MIPI_DPHY diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile index 3ff7b21c0414..3eab662a5a1d 100644 --- a/drivers/gpu/drm/rockchip/Makefile +++ b/drivers/gpu/drm/rockchip/Makefile @@ -11,6 +11,7 @@ rockchipdrm-$(CONFIG_ROCKCHIP_VOP) += rockchip_drm_vop.o rockchip_vop_reg.o rockchipdrm-$(CONFIG_ROCKCHIP_ANALOGIX_DP) += analogix_dp-rockchip.o rockchipdrm-$(CONFIG_ROCKCHIP_CDN_DP) += cdn-dp-core.o cdn-dp-reg.o rockchipdrm-$(CONFIG_ROCKCHIP_DW_HDMI) += dw_hdmi-rockchip.o +rockchipdrm-$(CONFIG_ROCKCHIP_DW_HDMI_QP) += dw_hdmi_qp-rockchip.o rockchipdrm-$(CONFIG_ROCKCHIP_DW_MIPI_DSI) += dw-mipi-dsi-rockchip.o rockchipdrm-$(CONFIG_ROCKCHIP_INNO_HDMI) += inno_hdmi.o rockchipdrm-$(CONFIG_ROCKCHIP_LVDS) += rockchip_lvds.o diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c index d3341edfe4f4..546d13f19f9b 100644 --- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c +++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c @@ -470,7 +470,7 @@ MODULE_DEVICE_TABLE(of, rockchip_dp_dt_ids); struct platform_driver rockchip_dp_driver = { .probe = rockchip_dp_probe, - .remove_new = rockchip_dp_remove, + .remove = rockchip_dp_remove, .driver = { .name = "rockchip-dp", .pm = pm_ptr(&rockchip_dp_pm_ops), diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c index b04538907f95..ff9d95e2c4d4 100644 --- a/drivers/gpu/drm/rockchip/cdn-dp-core.c +++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c @@ -1254,7 +1254,7 @@ static const struct dev_pm_ops cdn_dp_pm_ops = { struct platform_driver cdn_dp_driver = { .probe = cdn_dp_probe, - .remove_new = cdn_dp_remove, + .remove = cdn_dp_remove, .shutdown = cdn_dp_shutdown, .driver = { .name = "cdn-dp", diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c index 58a44af0e9ad..1b64b6e39cc8 100644 --- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c @@ -1709,7 +1709,7 @@ MODULE_DEVICE_TABLE(of, dw_mipi_dsi_rockchip_dt_ids); struct platform_driver dw_mipi_dsi_rockchip_driver = { .probe = dw_mipi_dsi_rockchip_probe, - .remove_new = dw_mipi_dsi_rockchip_remove, + .remove = dw_mipi_dsi_rockchip_remove, .driver = { .of_match_table = dw_mipi_dsi_rockchip_dt_ids, .pm = &dw_mipi_dsi_rockchip_pm_ops, diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c index 96e1097f993d..42bda4ffbbbd 100644 --- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c @@ -666,7 +666,7 @@ static const struct dev_pm_ops dw_hdmi_rockchip_pm = { struct platform_driver dw_hdmi_rockchip_pltfm_driver = { .probe = dw_hdmi_rockchip_probe, - .remove_new = dw_hdmi_rockchip_remove, + .remove = dw_hdmi_rockchip_remove, .driver = { .name = "dwhdmi-rockchip", .pm = &dw_hdmi_rockchip_pm, diff --git a/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c new file mode 100644 index 000000000000..c8b362cc2b95 --- /dev/null +++ b/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c @@ -0,0 +1,424 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (c) 2021-2022 Rockchip Electronics Co., Ltd. + * Copyright (c) 2024 Collabora Ltd. + * + * Author: Algea Cao <algea.cao@rock-chips.com> + * Author: Cristian Ciocaltea <cristian.ciocaltea@collabora.com> + */ + +#include <linux/clk.h> +#include <linux/gpio/consumer.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/phy/phy.h> +#include <linux/regmap.h> +#include <linux/workqueue.h> + +#include <drm/bridge/dw_hdmi_qp.h> +#include <drm/display/drm_hdmi_helper.h> +#include <drm/drm_bridge_connector.h> +#include <drm/drm_of.h> +#include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> + +#include "rockchip_drm_drv.h" + +#define RK3588_GRF_SOC_CON2 0x0308 +#define RK3588_HDMI0_HPD_INT_MSK BIT(13) +#define RK3588_HDMI0_HPD_INT_CLR BIT(12) +#define RK3588_GRF_SOC_CON7 0x031c +#define RK3588_SET_HPD_PATH_MASK GENMASK(13, 12) +#define RK3588_GRF_SOC_STATUS1 0x0384 +#define RK3588_HDMI0_LEVEL_INT BIT(16) +#define RK3588_GRF_VO1_CON3 0x000c +#define RK3588_SCLIN_MASK BIT(9) +#define RK3588_SDAIN_MASK BIT(10) +#define RK3588_MODE_MASK BIT(11) +#define RK3588_I2S_SEL_MASK BIT(13) +#define RK3588_GRF_VO1_CON9 0x0024 +#define RK3588_HDMI0_GRANT_SEL BIT(10) + +#define HIWORD_UPDATE(val, mask) ((val) | (mask) << 16) +#define HOTPLUG_DEBOUNCE_MS 150 + +struct rockchip_hdmi_qp { + struct device *dev; + struct regmap *regmap; + struct regmap *vo_regmap; + struct rockchip_encoder encoder; + struct clk *ref_clk; + struct dw_hdmi_qp *hdmi; + struct phy *phy; + struct gpio_desc *enable_gpio; + struct delayed_work hpd_work; +}; + +static struct rockchip_hdmi_qp *to_rockchip_hdmi_qp(struct drm_encoder *encoder) +{ + struct rockchip_encoder *rkencoder = to_rockchip_encoder(encoder); + + return container_of(rkencoder, struct rockchip_hdmi_qp, encoder); +} + +static void dw_hdmi_qp_rockchip_encoder_enable(struct drm_encoder *encoder) +{ + struct rockchip_hdmi_qp *hdmi = to_rockchip_hdmi_qp(encoder); + struct drm_crtc *crtc = encoder->crtc; + unsigned long long rate; + + /* Unconditionally switch to TMDS as FRL is not yet supported */ + gpiod_set_value(hdmi->enable_gpio, 1); + + if (crtc && crtc->state) { + rate = drm_hdmi_compute_mode_clock(&crtc->state->adjusted_mode, + 8, HDMI_COLORSPACE_RGB); + clk_set_rate(hdmi->ref_clk, rate); + /* + * FIXME: Temporary workaround to pass pixel clock rate + * to the PHY driver until phy_configure_opts_hdmi + * becomes available in the PHY API. See also the related + * comment in rk_hdptx_phy_power_on() from + * drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c + */ + phy_set_bus_width(hdmi->phy, div_u64(rate, 100)); + } +} + +static int +dw_hdmi_qp_rockchip_encoder_atomic_check(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state); + + s->output_mode = ROCKCHIP_OUT_MODE_AAAA; + s->output_type = DRM_MODE_CONNECTOR_HDMIA; + + return 0; +} + +static const struct +drm_encoder_helper_funcs dw_hdmi_qp_rockchip_encoder_helper_funcs = { + .enable = dw_hdmi_qp_rockchip_encoder_enable, + .atomic_check = dw_hdmi_qp_rockchip_encoder_atomic_check, +}; + +static int dw_hdmi_qp_rk3588_phy_init(struct dw_hdmi_qp *dw_hdmi, void *data) +{ + struct rockchip_hdmi_qp *hdmi = (struct rockchip_hdmi_qp *)data; + + return phy_power_on(hdmi->phy); +} + +static void dw_hdmi_qp_rk3588_phy_disable(struct dw_hdmi_qp *dw_hdmi, + void *data) +{ + struct rockchip_hdmi_qp *hdmi = (struct rockchip_hdmi_qp *)data; + + phy_power_off(hdmi->phy); +} + +static enum drm_connector_status +dw_hdmi_qp_rk3588_read_hpd(struct dw_hdmi_qp *dw_hdmi, void *data) +{ + struct rockchip_hdmi_qp *hdmi = (struct rockchip_hdmi_qp *)data; + u32 val; + + regmap_read(hdmi->regmap, RK3588_GRF_SOC_STATUS1, &val); + + return val & RK3588_HDMI0_LEVEL_INT ? + connector_status_connected : connector_status_disconnected; +} + +static void dw_hdmi_qp_rk3588_setup_hpd(struct dw_hdmi_qp *dw_hdmi, void *data) +{ + struct rockchip_hdmi_qp *hdmi = (struct rockchip_hdmi_qp *)data; + + regmap_write(hdmi->regmap, + RK3588_GRF_SOC_CON2, + HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_CLR, + RK3588_HDMI0_HPD_INT_CLR | + RK3588_HDMI0_HPD_INT_MSK)); +} + +static const struct dw_hdmi_qp_phy_ops rk3588_hdmi_phy_ops = { + .init = dw_hdmi_qp_rk3588_phy_init, + .disable = dw_hdmi_qp_rk3588_phy_disable, + .read_hpd = dw_hdmi_qp_rk3588_read_hpd, + .setup_hpd = dw_hdmi_qp_rk3588_setup_hpd, +}; + +static void dw_hdmi_qp_rk3588_hpd_work(struct work_struct *work) +{ + struct rockchip_hdmi_qp *hdmi = container_of(work, + struct rockchip_hdmi_qp, + hpd_work.work); + struct drm_device *drm = hdmi->encoder.encoder.dev; + bool changed; + + if (drm) { + changed = drm_helper_hpd_irq_event(drm); + if (changed) + drm_dbg(hdmi, "connector status changed\n"); + } +} + +static irqreturn_t dw_hdmi_qp_rk3588_hardirq(int irq, void *dev_id) +{ + struct rockchip_hdmi_qp *hdmi = dev_id; + u32 intr_stat, val; + + regmap_read(hdmi->regmap, RK3588_GRF_SOC_STATUS1, &intr_stat); + + if (intr_stat) { + val = HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_MSK, + RK3588_HDMI0_HPD_INT_MSK); + regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val); + return IRQ_WAKE_THREAD; + } + + return IRQ_NONE; +} + +static irqreturn_t dw_hdmi_qp_rk3588_irq(int irq, void *dev_id) +{ + struct rockchip_hdmi_qp *hdmi = dev_id; + u32 intr_stat, val; + + regmap_read(hdmi->regmap, RK3588_GRF_SOC_STATUS1, &intr_stat); + if (!intr_stat) + return IRQ_NONE; + + val = HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_CLR, + RK3588_HDMI0_HPD_INT_CLR); + regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val); + + mod_delayed_work(system_wq, &hdmi->hpd_work, + msecs_to_jiffies(HOTPLUG_DEBOUNCE_MS)); + + val |= HIWORD_UPDATE(0, RK3588_HDMI0_HPD_INT_MSK); + regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val); + + return IRQ_HANDLED; +} + +static const struct of_device_id dw_hdmi_qp_rockchip_dt_ids[] = { + { .compatible = "rockchip,rk3588-dw-hdmi-qp", + .data = &rk3588_hdmi_phy_ops }, + {}, +}; +MODULE_DEVICE_TABLE(of, dw_hdmi_qp_rockchip_dt_ids); + +static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master, + void *data) +{ + static const char * const clk_names[] = { + "pclk", "earc", "aud", "hdp", "hclk_vo1", + "ref" /* keep "ref" last */ + }; + struct platform_device *pdev = to_platform_device(dev); + struct dw_hdmi_qp_plat_data plat_data; + struct drm_device *drm = data; + struct drm_connector *connector; + struct drm_encoder *encoder; + struct rockchip_hdmi_qp *hdmi; + struct clk *clk; + int ret, irq, i; + u32 val; + + if (!pdev->dev.of_node) + return -ENODEV; + + hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL); + if (!hdmi) + return -ENOMEM; + + plat_data.phy_ops = of_device_get_match_data(dev); + if (!plat_data.phy_ops) + return -ENODEV; + + plat_data.phy_data = hdmi; + hdmi->dev = &pdev->dev; + + encoder = &hdmi->encoder.encoder; + encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node); + + rockchip_drm_encoder_set_crtc_endpoint_id(&hdmi->encoder, + dev->of_node, 0, 0); + /* + * If we failed to find the CRTC(s) which this encoder is + * supposed to be connected to, it's because the CRTC has + * not been registered yet. Defer probing, and hope that + * the required CRTC is added later. + */ + if (encoder->possible_crtcs == 0) + return -EPROBE_DEFER; + + hdmi->regmap = syscon_regmap_lookup_by_phandle(dev->of_node, + "rockchip,grf"); + if (IS_ERR(hdmi->regmap)) { + drm_err(hdmi, "Unable to get rockchip,grf\n"); + return PTR_ERR(hdmi->regmap); + } + + hdmi->vo_regmap = syscon_regmap_lookup_by_phandle(dev->of_node, + "rockchip,vo-grf"); + if (IS_ERR(hdmi->vo_regmap)) { + drm_err(hdmi, "Unable to get rockchip,vo-grf\n"); + return PTR_ERR(hdmi->vo_regmap); + } + + for (i = 0; i < ARRAY_SIZE(clk_names); i++) { + clk = devm_clk_get_enabled(hdmi->dev, clk_names[i]); + + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); + if (ret != -EPROBE_DEFER) + drm_err(hdmi, "Failed to get %s clock: %d\n", + clk_names[i], ret); + return ret; + } + } + hdmi->ref_clk = clk; + + hdmi->enable_gpio = devm_gpiod_get_optional(hdmi->dev, "enable", + GPIOD_OUT_HIGH); + if (IS_ERR(hdmi->enable_gpio)) { + ret = PTR_ERR(hdmi->enable_gpio); + drm_err(hdmi, "Failed to request enable GPIO: %d\n", ret); + return ret; + } + + hdmi->phy = devm_of_phy_get_by_index(dev, dev->of_node, 0); + if (IS_ERR(hdmi->phy)) { + ret = PTR_ERR(hdmi->phy); + if (ret != -EPROBE_DEFER) + drm_err(hdmi, "failed to get phy: %d\n", ret); + return ret; + } + + val = HIWORD_UPDATE(RK3588_SCLIN_MASK, RK3588_SCLIN_MASK) | + HIWORD_UPDATE(RK3588_SDAIN_MASK, RK3588_SDAIN_MASK) | + HIWORD_UPDATE(RK3588_MODE_MASK, RK3588_MODE_MASK) | + HIWORD_UPDATE(RK3588_I2S_SEL_MASK, RK3588_I2S_SEL_MASK); + regmap_write(hdmi->vo_regmap, RK3588_GRF_VO1_CON3, val); + + val = HIWORD_UPDATE(RK3588_SET_HPD_PATH_MASK, + RK3588_SET_HPD_PATH_MASK); + regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON7, val); + + val = HIWORD_UPDATE(RK3588_HDMI0_GRANT_SEL, + RK3588_HDMI0_GRANT_SEL); + regmap_write(hdmi->vo_regmap, RK3588_GRF_VO1_CON9, val); + + val = HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_MSK, RK3588_HDMI0_HPD_INT_MSK); + regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val); + + INIT_DELAYED_WORK(&hdmi->hpd_work, dw_hdmi_qp_rk3588_hpd_work); + + plat_data.main_irq = platform_get_irq_byname(pdev, "main"); + if (plat_data.main_irq < 0) + return plat_data.main_irq; + + irq = platform_get_irq_byname(pdev, "hpd"); + if (irq < 0) + return irq; + + ret = devm_request_threaded_irq(hdmi->dev, irq, + dw_hdmi_qp_rk3588_hardirq, + dw_hdmi_qp_rk3588_irq, + IRQF_SHARED, "dw-hdmi-qp-hpd", + hdmi); + if (ret) + return ret; + + drm_encoder_helper_add(encoder, &dw_hdmi_qp_rockchip_encoder_helper_funcs); + drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS); + + platform_set_drvdata(pdev, hdmi); + + hdmi->hdmi = dw_hdmi_qp_bind(pdev, encoder, &plat_data); + if (IS_ERR(hdmi->hdmi)) { + ret = PTR_ERR(hdmi->hdmi); + drm_encoder_cleanup(encoder); + return ret; + } + + connector = drm_bridge_connector_init(drm, encoder); + if (IS_ERR(connector)) { + ret = PTR_ERR(connector); + drm_err(hdmi, "failed to init bridge connector: %d\n", ret); + return ret; + } + + return drm_connector_attach_encoder(connector, encoder); +} + +static void dw_hdmi_qp_rockchip_unbind(struct device *dev, + struct device *master, + void *data) +{ + struct rockchip_hdmi_qp *hdmi = dev_get_drvdata(dev); + + cancel_delayed_work_sync(&hdmi->hpd_work); + + drm_encoder_cleanup(&hdmi->encoder.encoder); +} + +static const struct component_ops dw_hdmi_qp_rockchip_ops = { + .bind = dw_hdmi_qp_rockchip_bind, + .unbind = dw_hdmi_qp_rockchip_unbind, +}; + +static int dw_hdmi_qp_rockchip_probe(struct platform_device *pdev) +{ + return component_add(&pdev->dev, &dw_hdmi_qp_rockchip_ops); +} + +static void dw_hdmi_qp_rockchip_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &dw_hdmi_qp_rockchip_ops); +} + +static int __maybe_unused dw_hdmi_qp_rockchip_resume(struct device *dev) +{ + struct rockchip_hdmi_qp *hdmi = dev_get_drvdata(dev); + u32 val; + + val = HIWORD_UPDATE(RK3588_SCLIN_MASK, RK3588_SCLIN_MASK) | + HIWORD_UPDATE(RK3588_SDAIN_MASK, RK3588_SDAIN_MASK) | + HIWORD_UPDATE(RK3588_MODE_MASK, RK3588_MODE_MASK) | + HIWORD_UPDATE(RK3588_I2S_SEL_MASK, RK3588_I2S_SEL_MASK); + regmap_write(hdmi->vo_regmap, RK3588_GRF_VO1_CON3, val); + + val = HIWORD_UPDATE(RK3588_SET_HPD_PATH_MASK, + RK3588_SET_HPD_PATH_MASK); + regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON7, val); + + val = HIWORD_UPDATE(RK3588_HDMI0_GRANT_SEL, + RK3588_HDMI0_GRANT_SEL); + regmap_write(hdmi->vo_regmap, RK3588_GRF_VO1_CON9, val); + + dw_hdmi_qp_resume(dev, hdmi->hdmi); + + if (hdmi->encoder.encoder.dev) + drm_helper_hpd_irq_event(hdmi->encoder.encoder.dev); + + return 0; +} + +static const struct dev_pm_ops dw_hdmi_qp_rockchip_pm = { + SET_SYSTEM_SLEEP_PM_OPS(NULL, dw_hdmi_qp_rockchip_resume) +}; + +struct platform_driver dw_hdmi_qp_rockchip_pltfm_driver = { + .probe = dw_hdmi_qp_rockchip_probe, + .remove = dw_hdmi_qp_rockchip_remove, + .driver = { + .name = "dwhdmiqp-rockchip", + .pm = &dw_hdmi_qp_rockchip_pm, + .of_match_table = dw_hdmi_qp_rockchip_dt_ids, + }, +}; diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c index 42ef62aa0a1e..b58e2a29294b 100644 --- a/drivers/gpu/drm/rockchip/inno_hdmi.c +++ b/drivers/gpu/drm/rockchip/inno_hdmi.c @@ -1017,7 +1017,7 @@ MODULE_DEVICE_TABLE(of, inno_hdmi_dt_ids); struct platform_driver inno_hdmi_driver = { .probe = inno_hdmi_probe, - .remove_new = inno_hdmi_remove, + .remove = inno_hdmi_remove, .driver = { .name = "innohdmi-rockchip", .of_match_table = inno_hdmi_dt_ids, diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.c b/drivers/gpu/drm/rockchip/rk3066_hdmi.c index 784de990da1b..b0fc8ace2e41 100644 --- a/drivers/gpu/drm/rockchip/rk3066_hdmi.c +++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.c @@ -858,7 +858,7 @@ MODULE_DEVICE_TABLE(of, rk3066_hdmi_dt_ids); struct platform_driver rk3066_hdmi_driver = { .probe = rk3066_hdmi_probe, - .remove_new = rk3066_hdmi_remove, + .remove = rk3066_hdmi_remove, .driver = { .name = "rockchip-rk3066-hdmi", .of_match_table = rk3066_hdmi_dt_ids, diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index 4e080040ba01..32d8394c4c49 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c @@ -6,6 +6,7 @@ * based on exynos_drm_drv.c */ +#include <linux/aperture.h> #include <linux/dma-mapping.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> @@ -16,7 +17,6 @@ #include <linux/console.h> #include <linux/iommu.h> -#include <drm/drm_aperture.h> #include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> @@ -146,7 +146,7 @@ static int rockchip_drm_bind(struct device *dev) int ret; /* Remove existing drivers that may own the framebuffer memory. */ - ret = drm_aperture_remove_framebuffers(&rockchip_drm_driver); + ret = aperture_remove_all_conflicting_devices(rockchip_drm_driver.name); if (ret) { DRM_DEV_ERROR(dev, "Failed to remove existing framebuffers - %d.\n", @@ -500,7 +500,7 @@ MODULE_DEVICE_TABLE(of, rockchip_drm_dt_ids); static struct platform_driver rockchip_drm_platform_driver = { .probe = rockchip_drm_platform_probe, - .remove_new = rockchip_drm_platform_remove, + .remove = rockchip_drm_platform_remove, .shutdown = rockchip_drm_platform_shutdown, .driver = { .name = "rockchip-drm", @@ -532,6 +532,8 @@ static int __init rockchip_drm_init(void) ADD_ROCKCHIP_SUB_DRIVER(cdn_dp_driver, CONFIG_ROCKCHIP_CDN_DP); ADD_ROCKCHIP_SUB_DRIVER(dw_hdmi_rockchip_pltfm_driver, CONFIG_ROCKCHIP_DW_HDMI); + ADD_ROCKCHIP_SUB_DRIVER(dw_hdmi_qp_rockchip_pltfm_driver, + CONFIG_ROCKCHIP_DW_HDMI_QP); ADD_ROCKCHIP_SUB_DRIVER(dw_mipi_dsi_rockchip_driver, CONFIG_ROCKCHIP_DW_MIPI_DSI); ADD_ROCKCHIP_SUB_DRIVER(inno_hdmi_driver, CONFIG_ROCKCHIP_INNO_HDMI); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h index 8d566fcd80a2..24b4ce5ceaf1 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h @@ -88,6 +88,7 @@ int rockchip_drm_encoder_set_crtc_endpoint_id(struct rockchip_encoder *rencoder, int rockchip_drm_endpoint_is_subdriver(struct device_node *ep); extern struct platform_driver cdn_dp_driver; extern struct platform_driver dw_hdmi_rockchip_pltfm_driver; +extern struct platform_driver dw_hdmi_qp_rockchip_pltfm_driver; extern struct platform_driver dw_mipi_dsi_rockchip_driver; extern struct platform_driver inno_hdmi_driver; extern struct platform_driver rockchip_dp_driver; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index f161f40d8ce4..69900138295b 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -1093,10 +1093,10 @@ static int vop_plane_atomic_async_check(struct drm_plane *plane, if (!plane->state->fb) return -EINVAL; - if (state) - crtc_state = drm_atomic_get_existing_crtc_state(state, - new_plane_state->crtc); - else /* Special case for asynchronous cursor updates. */ + crtc_state = drm_atomic_get_existing_crtc_state(state, new_plane_state->crtc); + + /* Special case for asynchronous cursor updates. */ + if (!crtc_state) crtc_state = plane->crtc->state; return drm_atomic_helper_check_plane_state(plane->state, crtc_state, diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c index 9a01aa450741..aba733736ff7 100644 --- a/drivers/gpu/drm/rockchip/rockchip_lvds.c +++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c @@ -746,7 +746,7 @@ static void rockchip_lvds_remove(struct platform_device *pdev) struct platform_driver rockchip_lvds_driver = { .probe = rockchip_lvds_probe, - .remove_new = rockchip_lvds_remove, + .remove = rockchip_lvds_remove, .driver = { .name = "rockchip-lvds", .of_match_table = rockchip_lvds_dt_ids, diff --git a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c index 18efb3fe1c00..f9d87a0abc8b 100644 --- a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c +++ b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c @@ -501,7 +501,7 @@ static void vop2_remove(struct platform_device *pdev) struct platform_driver vop2_platform_driver = { .probe = vop2_probe, - .remove_new = vop2_remove, + .remove = vop2_remove, .driver = { .name = "rockchip-vop2", .of_match_table = vop2_dt_match, diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c index e2c6ba26f437..8998967f0c00 100644 --- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c +++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c @@ -1284,7 +1284,7 @@ static void vop_remove(struct platform_device *pdev) struct platform_driver vop_platform_driver = { .probe = vop_probe, - .remove_new = vop_remove, + .remove = vop_remove, .driver = { .name = "rockchip-vop", .of_match_table = vop_driver_dt_match, diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index 2951fcc2e6b1..69bcf0e99d57 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -105,7 +105,7 @@ int drm_sched_entity_init(struct drm_sched_entity *entity, /* We start in an idle state. */ complete_all(&entity->entity_idle); - spin_lock_init(&entity->rq_lock); + spin_lock_init(&entity->lock); spsc_queue_init(&entity->job_queue); atomic_set(&entity->fence_seq, 0); @@ -133,10 +133,10 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, { WARN_ON(!num_sched_list || !sched_list); - spin_lock(&entity->rq_lock); + spin_lock(&entity->lock); entity->sched_list = sched_list; entity->num_sched_list = num_sched_list; - spin_unlock(&entity->rq_lock); + spin_unlock(&entity->lock); } EXPORT_SYMBOL(drm_sched_entity_modify_sched); @@ -244,10 +244,10 @@ static void drm_sched_entity_kill(struct drm_sched_entity *entity) if (!entity->rq) return; - spin_lock(&entity->rq_lock); + spin_lock(&entity->lock); entity->stopped = true; drm_sched_rq_remove_entity(entity->rq, entity); - spin_unlock(&entity->rq_lock); + spin_unlock(&entity->lock); /* Make sure this entity is not used by the scheduler at the moment */ wait_for_completion(&entity->entity_idle); @@ -396,9 +396,9 @@ static void drm_sched_entity_wakeup(struct dma_fence *f, void drm_sched_entity_set_priority(struct drm_sched_entity *entity, enum drm_sched_priority priority) { - spin_lock(&entity->rq_lock); + spin_lock(&entity->lock); entity->priority = priority; - spin_unlock(&entity->rq_lock); + spin_unlock(&entity->lock); } EXPORT_SYMBOL(drm_sched_entity_set_priority); @@ -514,8 +514,17 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity) struct drm_sched_job *next; next = to_drm_sched_job(spsc_queue_peek(&entity->job_queue)); - if (next) - drm_sched_rq_update_fifo(entity, next->submit_ts); + if (next) { + struct drm_sched_rq *rq; + + spin_lock(&entity->lock); + rq = entity->rq; + spin_lock(&rq->lock); + drm_sched_rq_update_fifo_locked(entity, rq, + next->submit_ts); + spin_unlock(&rq->lock); + spin_unlock(&entity->lock); + } } /* Jobs and entities might have different lifecycles. Since we're @@ -555,14 +564,14 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity) if (fence && !dma_fence_is_signaled(fence)) return; - spin_lock(&entity->rq_lock); + spin_lock(&entity->lock); sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list); rq = sched ? sched->sched_rq[entity->priority] : NULL; if (rq != entity->rq) { drm_sched_rq_remove_entity(entity->rq, entity); entity->rq = rq; } - spin_unlock(&entity->rq_lock); + spin_unlock(&entity->lock); if (entity->num_sched_list == 1) entity->sched_list = NULL; @@ -601,9 +610,9 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job) struct drm_sched_rq *rq; /* Add the entity to the run queue */ - spin_lock(&entity->rq_lock); + spin_lock(&entity->lock); if (entity->stopped) { - spin_unlock(&entity->rq_lock); + spin_unlock(&entity->lock); DRM_ERROR("Trying to push to a killed entity\n"); return; @@ -612,11 +621,14 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job) rq = entity->rq; sched = rq->sched; + spin_lock(&rq->lock); drm_sched_rq_add_entity(rq, entity); - spin_unlock(&entity->rq_lock); if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) - drm_sched_rq_update_fifo(entity, submit_ts); + drm_sched_rq_update_fifo_locked(entity, rq, submit_ts); + + spin_unlock(&rq->lock); + spin_unlock(&entity->lock); drm_sched_wakeup(sched); } diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 36db5c7736fc..7ce25281c74c 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -87,6 +87,12 @@ #define CREATE_TRACE_POINTS #include "gpu_scheduler_trace.h" +#ifdef CONFIG_LOCKDEP +static struct lockdep_map drm_sched_lockdep_map = { + .name = "drm_sched_lockdep_map" +}; +#endif + #define to_drm_sched_job(sched_job) \ container_of((sched_job), struct drm_sched_job, queue_node) @@ -153,35 +159,33 @@ static __always_inline bool drm_sched_entity_compare_before(struct rb_node *a, return ktime_before(ent_a->oldest_job_waiting, ent_b->oldest_job_waiting); } -static inline void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity) +static void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity, + struct drm_sched_rq *rq) { - struct drm_sched_rq *rq = entity->rq; - if (!RB_EMPTY_NODE(&entity->rb_tree_node)) { rb_erase_cached(&entity->rb_tree_node, &rq->rb_tree_root); RB_CLEAR_NODE(&entity->rb_tree_node); } } -void drm_sched_rq_update_fifo(struct drm_sched_entity *entity, ktime_t ts) +void drm_sched_rq_update_fifo_locked(struct drm_sched_entity *entity, + struct drm_sched_rq *rq, + ktime_t ts) { /* * Both locks need to be grabbed, one to protect from entity->rq change * for entity from within concurrent drm_sched_entity_select_rq and the * other to update the rb tree structure. */ - spin_lock(&entity->rq_lock); - spin_lock(&entity->rq->lock); + lockdep_assert_held(&entity->lock); + lockdep_assert_held(&rq->lock); - drm_sched_rq_remove_fifo_locked(entity); + drm_sched_rq_remove_fifo_locked(entity, rq); entity->oldest_job_waiting = ts; - rb_add_cached(&entity->rb_tree_node, &entity->rq->rb_tree_root, + rb_add_cached(&entity->rb_tree_node, &rq->rb_tree_root, drm_sched_entity_compare_before); - - spin_unlock(&entity->rq->lock); - spin_unlock(&entity->rq_lock); } /** @@ -213,15 +217,14 @@ static void drm_sched_rq_init(struct drm_gpu_scheduler *sched, void drm_sched_rq_add_entity(struct drm_sched_rq *rq, struct drm_sched_entity *entity) { + lockdep_assert_held(&entity->lock); + lockdep_assert_held(&rq->lock); + if (!list_empty(&entity->list)) return; - spin_lock(&rq->lock); - atomic_inc(rq->sched->score); list_add_tail(&entity->list, &rq->entities); - - spin_unlock(&rq->lock); } /** @@ -235,6 +238,8 @@ void drm_sched_rq_add_entity(struct drm_sched_rq *rq, void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, struct drm_sched_entity *entity) { + lockdep_assert_held(&entity->lock); + if (list_empty(&entity->list)) return; @@ -247,7 +252,7 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, rq->current_entity = NULL; if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) - drm_sched_rq_remove_fifo_locked(entity); + drm_sched_rq_remove_fifo_locked(entity, rq); spin_unlock(&rq->lock); } @@ -349,7 +354,6 @@ drm_sched_rq_select_entity_fifo(struct drm_gpu_scheduler *sched, return ERR_PTR(-ENOSPC); } - rq->current_entity = entity; reinit_completion(&entity->entity_idle); break; } @@ -595,6 +599,9 @@ static void drm_sched_job_timedout(struct work_struct *work) * callers responsibility to release it manually if it's not part of the * pending list any more. * + * This function is typically used for reset recovery (see the docu of + * drm_sched_backend_ops.timedout_job() for details). Do not call it for + * scheduler teardown, i.e., before calling drm_sched_fini(). */ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) { @@ -667,7 +674,6 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) */ cancel_delayed_work(&sched->work_tdr); } - EXPORT_SYMBOL(drm_sched_stop); /** @@ -676,6 +682,10 @@ EXPORT_SYMBOL(drm_sched_stop); * @sched: scheduler instance * @errno: error to set on the pending fences * + * This function is typically used for reset recovery (see the docu of + * drm_sched_backend_ops.timedout_job() for details). Do not call it for + * scheduler startup. The scheduler itself is fully operational after + * drm_sched_init() succeeded. */ void drm_sched_start(struct drm_gpu_scheduler *sched, int errno) { @@ -773,6 +783,10 @@ EXPORT_SYMBOL(drm_sched_resubmit_jobs); * Drivers must make sure drm_sched_job_cleanup() if this function returns * successfully, even when @job is aborted before drm_sched_job_arm() is called. * + * Note that this function does not assign a valid value to each struct member + * of struct drm_sched_job. Take a look at that struct's documentation to see + * who sets which struct member with what lifetime. + * * WARNING: amdgpu abuses &drm_sched.ready to signal when the hardware * has died, which can mean that there's no valid runqueue for a @entity. * This function returns -ENOENT in this case (which probably should be -EIO as @@ -798,6 +812,14 @@ int drm_sched_job_init(struct drm_sched_job *job, return -EINVAL; } + /* + * We don't know for sure how the user has allocated. Thus, zero the + * struct so that unallowed (i.e., too early) usage of pointers that + * this function does not set is guaranteed to lead to a NULL pointer + * exception instead of UB. + */ + memset(job, 0, sizeof(*job)); + job->entity = entity; job->credits = credits; job->s_fence = drm_sched_fence_alloc(entity, owner); @@ -1270,7 +1292,13 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, sched->submit_wq = submit_wq; sched->own_submit_wq = false; } else { - sched->submit_wq = alloc_ordered_workqueue(name, 0); +#ifdef CONFIG_LOCKDEP + sched->submit_wq = alloc_ordered_workqueue_lockdep_map(name, + WQ_MEM_RECLAIM, + &drm_sched_lockdep_map); +#else + sched->submit_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); +#endif if (!sched->submit_wq) return -ENOMEM; @@ -1322,6 +1350,19 @@ EXPORT_SYMBOL(drm_sched_init); * @sched: scheduler instance * * Tears down and cleans up the scheduler. + * + * This stops submission of new jobs to the hardware through + * drm_sched_backend_ops.run_job(). Consequently, drm_sched_backend_ops.free_job() + * will not be called for all jobs still in drm_gpu_scheduler.pending_list. + * There is no solution for this currently. Thus, it is up to the driver to make + * sure that + * a) drm_sched_fini() is only called after for all submitted jobs + * drm_sched_backend_ops.free_job() has been called or that + * b) the jobs for which drm_sched_backend_ops.free_job() has not been called + * after drm_sched_fini() ran are freed manually. + * + * FIXME: Take care of the above problem and prevent this function from leaking + * the jobs in drm_gpu_scheduler.pending_list under any circumstances. */ void drm_sched_fini(struct drm_gpu_scheduler *sched) { @@ -1417,8 +1458,10 @@ EXPORT_SYMBOL(drm_sched_wqueue_ready); /** * drm_sched_wqueue_stop - stop scheduler submission - * * @sched: scheduler instance + * + * Stops the scheduler from pulling new jobs from entities. It also stops + * freeing jobs automatically through drm_sched_backend_ops.free_job(). */ void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched) { @@ -1430,8 +1473,12 @@ EXPORT_SYMBOL(drm_sched_wqueue_stop); /** * drm_sched_wqueue_start - start scheduler submission - * * @sched: scheduler instance + * + * Restarts the scheduler after drm_sched_wqueue_stop() has stopped it. + * + * This function is not necessary for 'conventional' startup. The scheduler is + * fully operational after drm_sched_init() succeeded. */ void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched) { diff --git a/drivers/gpu/drm/solomon/ssd130x-i2c.c b/drivers/gpu/drm/solomon/ssd130x-i2c.c index f2ccab9c06d9..941a2eb44c57 100644 --- a/drivers/gpu/drm/solomon/ssd130x-i2c.c +++ b/drivers/gpu/drm/solomon/ssd130x-i2c.c @@ -123,4 +123,4 @@ module_i2c_driver(ssd130x_i2c_driver); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_AUTHOR("Javier Martinez Canillas <javierm@redhat.com>"); MODULE_LICENSE("GPL v2"); -MODULE_IMPORT_NS(DRM_SSD130X); +MODULE_IMPORT_NS("DRM_SSD130X"); diff --git a/drivers/gpu/drm/solomon/ssd130x-spi.c b/drivers/gpu/drm/solomon/ssd130x-spi.c index 84bfde31d172..08334be38694 100644 --- a/drivers/gpu/drm/solomon/ssd130x-spi.c +++ b/drivers/gpu/drm/solomon/ssd130x-spi.c @@ -192,4 +192,4 @@ module_spi_driver(ssd130x_spi_driver); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_AUTHOR("Javier Martinez Canillas <javierm@redhat.com>"); MODULE_LICENSE("GPL"); -MODULE_IMPORT_NS(DRM_SSD130X); +MODULE_IMPORT_NS("DRM_SSD130X"); diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c index 29b2f82d81f8..486d8f5282f9 100644 --- a/drivers/gpu/drm/solomon/ssd130x.c +++ b/drivers/gpu/drm/solomon/ssd130x.c @@ -209,7 +209,7 @@ const struct ssd130x_deviceinfo ssd130x_variants[] = { .family_id = SSD133X_FAMILY, } }; -EXPORT_SYMBOL_NS_GPL(ssd130x_variants, DRM_SSD130X); +EXPORT_SYMBOL_NS_GPL(ssd130x_variants, "DRM_SSD130X"); struct ssd130x_crtc_state { struct drm_crtc_state base; diff --git a/drivers/gpu/drm/sprd/sprd_dpu.c b/drivers/gpu/drm/sprd/sprd_dpu.c index deb3bb96e2a8..cb2816985305 100644 --- a/drivers/gpu/drm/sprd/sprd_dpu.c +++ b/drivers/gpu/drm/sprd/sprd_dpu.c @@ -866,7 +866,7 @@ static void sprd_dpu_remove(struct platform_device *pdev) struct platform_driver sprd_dpu_driver = { .probe = sprd_dpu_probe, - .remove_new = sprd_dpu_remove, + .remove = sprd_dpu_remove, .driver = { .name = "sprd-dpu-drv", .of_match_table = dpu_match_table, diff --git a/drivers/gpu/drm/sprd/sprd_drm.c b/drivers/gpu/drm/sprd/sprd_drm.c index a74cd0caf645..bc1c747d3ea4 100644 --- a/drivers/gpu/drm/sprd/sprd_drm.c +++ b/drivers/gpu/drm/sprd/sprd_drm.c @@ -163,7 +163,7 @@ MODULE_DEVICE_TABLE(of, drm_match_table); static struct platform_driver sprd_drm_driver = { .probe = sprd_drm_probe, - .remove_new = sprd_drm_remove, + .remove = sprd_drm_remove, .shutdown = sprd_drm_shutdown, .driver = { .name = "sprd-drm-drv", diff --git a/drivers/gpu/drm/sprd/sprd_dsi.c b/drivers/gpu/drm/sprd/sprd_dsi.c index 0b69c140eab3..8fc26479bb6b 100644 --- a/drivers/gpu/drm/sprd/sprd_dsi.c +++ b/drivers/gpu/drm/sprd/sprd_dsi.c @@ -209,7 +209,7 @@ static int regmap_tst_io_read(void *context, u32 reg, u32 *val) return 0; } -static struct regmap_bus regmap_tst_io = { +static const struct regmap_bus regmap_tst_io = { .reg_write = regmap_tst_io_write, .reg_read = regmap_tst_io_read, }; @@ -1060,7 +1060,7 @@ static void sprd_dsi_remove(struct platform_device *pdev) struct platform_driver sprd_dsi_driver = { .probe = sprd_dsi_probe, - .remove_new = sprd_dsi_remove, + .remove = sprd_dsi_remove, .driver = { .name = "sprd-dsi-drv", .of_match_table = dsi_match_table, diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c index 33487a1fed8f..063f82d23d80 100644 --- a/drivers/gpu/drm/sti/sti_compositor.c +++ b/drivers/gpu/drm/sti/sti_compositor.c @@ -269,7 +269,7 @@ struct platform_driver sti_compositor_driver = { .of_match_table = compositor_of_match, }, .probe = sti_compositor_probe, - .remove_new = sti_compositor_remove, + .remove = sti_compositor_remove, }; MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>"); diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c index 65f180c8e8e2..61ceff9aee7e 100644 --- a/drivers/gpu/drm/sti/sti_drv.c +++ b/drivers/gpu/drm/sti/sti_drv.c @@ -270,7 +270,7 @@ MODULE_DEVICE_TABLE(of, sti_dt_ids); static struct platform_driver sti_platform_driver = { .probe = sti_platform_probe, - .remove_new = sti_platform_remove, + .remove = sti_platform_remove, .shutdown = sti_platform_shutdown, .driver = { .name = DRIVER_NAME, diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c index 68b8197b3dd1..c6c2abaa1891 100644 --- a/drivers/gpu/drm/sti/sti_dvo.c +++ b/drivers/gpu/drm/sti/sti_dvo.c @@ -585,7 +585,7 @@ struct platform_driver sti_dvo_driver = { .of_match_table = dvo_of_match, }, .probe = sti_dvo_probe, - .remove_new = sti_dvo_remove, + .remove = sti_dvo_remove, }; MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>"); diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c index f18faad974aa..b12863bea955 100644 --- a/drivers/gpu/drm/sti/sti_hda.c +++ b/drivers/gpu/drm/sti/sti_hda.c @@ -810,7 +810,7 @@ struct platform_driver sti_hda_driver = { .of_match_table = hda_of_match, }, .probe = sti_hda_probe, - .remove_new = sti_hda_remove, + .remove = sti_hda_remove, }; MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>"); diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c index 847470f747c0..21b46a6465f0 100644 --- a/drivers/gpu/drm/sti/sti_hdmi.c +++ b/drivers/gpu/drm/sti/sti_hdmi.c @@ -1492,7 +1492,7 @@ struct platform_driver sti_hdmi_driver = { .of_match_table = hdmi_of_match, }, .probe = sti_hdmi_probe, - .remove_new = sti_hdmi_remove, + .remove = sti_hdmi_remove, }; MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>"); diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c index 5793cf2cb897..0f658709c9d0 100644 --- a/drivers/gpu/drm/sti/sti_hqvdp.c +++ b/drivers/gpu/drm/sti/sti_hqvdp.c @@ -1420,7 +1420,7 @@ struct platform_driver sti_hqvdp_driver = { .of_match_table = hqvdp_of_match, }, .probe = sti_hqvdp_probe, - .remove_new = sti_hqvdp_remove, + .remove = sti_hqvdp_remove, }; MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>"); diff --git a/drivers/gpu/drm/sti/sti_mixer.c b/drivers/gpu/drm/sti/sti_mixer.c index 7e5f14646625..06c1b81912f7 100644 --- a/drivers/gpu/drm/sti/sti_mixer.c +++ b/drivers/gpu/drm/sti/sti_mixer.c @@ -137,7 +137,7 @@ static void mixer_dbg_crb(struct seq_file *s, int val) } } -static void mixer_dbg_mxn(struct seq_file *s, void *addr) +static void mixer_dbg_mxn(struct seq_file *s, void __iomem *addr) { int i; diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c index e714c232026c..af6c06f448c4 100644 --- a/drivers/gpu/drm/sti/sti_tvout.c +++ b/drivers/gpu/drm/sti/sti_tvout.c @@ -889,7 +889,7 @@ struct platform_driver sti_tvout_driver = { .of_match_table = tvout_of_match, }, .probe = sti_tvout_probe, - .remove_new = sti_tvout_remove, + .remove = sti_tvout_remove, }; MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>"); diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c index fee274a60623..bf090a354989 100644 --- a/drivers/gpu/drm/stm/drv.c +++ b/drivers/gpu/drm/stm/drv.c @@ -8,6 +8,7 @@ * Mickael Reulier <mickael.reulier@st.com> */ +#include <linux/aperture.h> #include <linux/component.h> #include <linux/dma-mapping.h> #include <linux/mod_devicetable.h> @@ -15,7 +16,6 @@ #include <linux/platform_device.h> #include <linux/pm_runtime.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_client_setup.h> @@ -191,7 +191,7 @@ static int stm_drm_platform_probe(struct platform_device *pdev) DRM_DEBUG("%s\n", __func__); - ret = drm_aperture_remove_framebuffers(&drv_driver); + ret = aperture_remove_all_conflicting_devices(drv_driver.name); if (ret) return ret; @@ -245,7 +245,7 @@ MODULE_DEVICE_TABLE(of, drv_dt_ids); static struct platform_driver stm_drm_platform_driver = { .probe = stm_drm_platform_probe, - .remove_new = stm_drm_platform_remove, + .remove = stm_drm_platform_remove, .shutdown = stm_drm_platform_shutdown, .driver = { .name = "stm32-display", diff --git a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c index b20123854c4a..2c7bc064bc66 100644 --- a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c +++ b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c @@ -783,7 +783,7 @@ static const struct dev_pm_ops dw_mipi_dsi_stm_pm_ops = { static struct platform_driver dw_mipi_dsi_stm_driver = { .probe = dw_mipi_dsi_stm_probe, - .remove_new = dw_mipi_dsi_stm_remove, + .remove = dw_mipi_dsi_stm_remove, .driver = { .of_match_table = dw_mipi_dsi_stm_dt_ids, .name = "stm32-display-dsi", diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c index e89eb96d3131..2dded3b828df 100644 --- a/drivers/gpu/drm/sun4i/sun4i_backend.c +++ b/drivers/gpu/drm/sun4i/sun4i_backend.c @@ -1028,7 +1028,7 @@ MODULE_DEVICE_TABLE(of, sun4i_backend_of_table); static struct platform_driver sun4i_backend_platform_driver = { .probe = sun4i_backend_probe, - .remove_new = sun4i_backend_remove, + .remove = sun4i_backend_remove, .driver = { .name = "sun4i-backend", .of_match_table = sun4i_backend_of_table, diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index c3342789e3b0..5eccf58f2e17 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c @@ -6,6 +6,7 @@ * Maxime Ripard <maxime.ripard@free-electrons.com> */ +#include <linux/aperture.h> #include <linux/component.h> #include <linux/dma-mapping.h> #include <linux/kfifo.h> @@ -14,7 +15,6 @@ #include <linux/of_reserved_mem.h> #include <linux/platform_device.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_client_setup.h> #include <drm/drm_drv.h> @@ -100,7 +100,7 @@ static int sun4i_drv_bind(struct device *dev) goto unbind_all; /* Remove early framebuffers (ie. simplefb) */ - ret = drm_aperture_remove_framebuffers(&sun4i_drv_driver); + ret = aperture_remove_all_conflicting_devices(sun4i_drv_driver.name); if (ret) goto unbind_all; @@ -443,7 +443,7 @@ MODULE_DEVICE_TABLE(of, sun4i_drv_of_table); static struct platform_driver sun4i_drv_platform_driver = { .probe = sun4i_drv_probe, - .remove_new = sun4i_drv_remove, + .remove = sun4i_drv_remove, .shutdown = sun4i_drv_shutdown, .driver = { .name = "sun4i-drm", diff --git a/drivers/gpu/drm/sun4i/sun4i_frontend.c b/drivers/gpu/drm/sun4i/sun4i_frontend.c index 280d444dbb66..5ab1604f12dd 100644 --- a/drivers/gpu/drm/sun4i/sun4i_frontend.c +++ b/drivers/gpu/drm/sun4i/sun4i_frontend.c @@ -717,7 +717,7 @@ MODULE_DEVICE_TABLE(of, sun4i_frontend_of_table); static struct platform_driver sun4i_frontend_driver = { .probe = sun4i_frontend_probe, - .remove_new = sun4i_frontend_remove, + .remove = sun4i_frontend_remove, .driver = { .name = "sun4i-frontend", .of_match_table = sun4i_frontend_of_table, diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c index b3649449de30..453f19f16ab7 100644 --- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c +++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c @@ -741,7 +741,7 @@ MODULE_DEVICE_TABLE(of, sun4i_hdmi_of_table); static struct platform_driver sun4i_hdmi_driver = { .probe = sun4i_hdmi_probe, - .remove_new = sun4i_hdmi_remove, + .remove = sun4i_hdmi_remove, .driver = { .name = "sun4i-hdmi", .of_match_table = sun4i_hdmi_of_table, diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c index a1a2c845ade0..960e83c8291d 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c @@ -1568,7 +1568,7 @@ EXPORT_SYMBOL(sun4i_tcon_of_table); static struct platform_driver sun4i_tcon_platform_driver = { .probe = sun4i_tcon_probe, - .remove_new = sun4i_tcon_remove, + .remove = sun4i_tcon_remove, .driver = { .name = "sun4i-tcon", .of_match_table = sun4i_tcon_of_table, diff --git a/drivers/gpu/drm/sun4i/sun4i_tv.c b/drivers/gpu/drm/sun4i/sun4i_tv.c index ec65d9d59de7..cce4e38789b9 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tv.c +++ b/drivers/gpu/drm/sun4i/sun4i_tv.c @@ -559,7 +559,7 @@ MODULE_DEVICE_TABLE(of, sun4i_tv_of_table); static struct platform_driver sun4i_tv_platform_driver = { .probe = sun4i_tv_probe, - .remove_new = sun4i_tv_remove, + .remove = sun4i_tv_remove, .driver = { .name = "sun4i-tve", .of_match_table = sun4i_tv_of_table, diff --git a/drivers/gpu/drm/sun4i/sun6i_drc.c b/drivers/gpu/drm/sun4i/sun6i_drc.c index 0d342f43fa93..310c7e0daede 100644 --- a/drivers/gpu/drm/sun4i/sun6i_drc.c +++ b/drivers/gpu/drm/sun4i/sun6i_drc.c @@ -112,7 +112,7 @@ MODULE_DEVICE_TABLE(of, sun6i_drc_of_table); static struct platform_driver sun6i_drc_platform_driver = { .probe = sun6i_drc_probe, - .remove_new = sun6i_drc_remove, + .remove = sun6i_drc_remove, .driver = { .name = "sun6i-drc", .of_match_table = sun6i_drc_of_table, diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c index 4abf4f102007..c35b70d83e53 100644 --- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c +++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c @@ -1244,7 +1244,7 @@ MODULE_DEVICE_TABLE(of, sun6i_dsi_of_table); static struct platform_driver sun6i_dsi_platform_driver = { .probe = sun6i_dsi_probe, - .remove_new = sun6i_dsi_remove, + .remove = sun6i_dsi_remove, .driver = { .name = "sun6i-mipi-dsi", .of_match_table = sun6i_dsi_of_table, diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c index 4727dfaa8fb9..96532709c2a7 100644 --- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c +++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c @@ -264,7 +264,7 @@ MODULE_DEVICE_TABLE(of, sun8i_dw_hdmi_dt_ids); static struct platform_driver sun8i_dw_hdmi_pltfm_driver = { .probe = sun8i_dw_hdmi_probe, - .remove_new = sun8i_dw_hdmi_remove, + .remove = sun8i_dw_hdmi_remove, .driver = { .name = "sun8i-dw-hdmi", .of_match_table = sun8i_dw_hdmi_dt_ids, diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c index bd0fe2c6624e..8b41d33baa30 100644 --- a/drivers/gpu/drm/sun4i/sun8i_mixer.c +++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c @@ -775,7 +775,7 @@ MODULE_DEVICE_TABLE(of, sun8i_mixer_of_table); static struct platform_driver sun8i_mixer_platform_driver = { .probe = sun8i_mixer_probe, - .remove_new = sun8i_mixer_remove, + .remove = sun8i_mixer_remove, .driver = { .name = "sun8i-mixer", .of_match_table = sun8i_mixer_of_table, diff --git a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c index a1ca3916f42b..8adda578c51b 100644 --- a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c +++ b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c @@ -299,7 +299,7 @@ EXPORT_SYMBOL(sun8i_tcon_top_of_table); static struct platform_driver sun8i_tcon_top_platform_driver = { .probe = sun8i_tcon_top_probe, - .remove_new = sun8i_tcon_top_remove, + .remove = sun8i_tcon_top_remove, .driver = { .name = "sun8i-tcon-top", .of_match_table = sun8i_tcon_top_of_table, diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index be61c9d1a4f0..430b2eededb2 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -3286,5 +3286,5 @@ struct platform_driver tegra_dc_driver = { .of_match_table = tegra_dc_of_match, }, .probe = tegra_dc_probe, - .remove_new = tegra_dc_remove, + .remove = tegra_dc_remove, }; diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c index ae12d001a04b..2cd8dcb959c0 100644 --- a/drivers/gpu/drm/tegra/dpaux.c +++ b/drivers/gpu/drm/tegra/dpaux.c @@ -697,7 +697,7 @@ struct platform_driver tegra_dpaux_driver = { .pm = pm_ptr(&tegra_dpaux_pm_ops), }, .probe = tegra_dpaux_probe, - .remove_new = tegra_dpaux_remove, + .remove = tegra_dpaux_remove, }; struct drm_dp_aux *drm_dp_aux_find_by_of_node(struct device_node *np) diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index dd82ab71d4a9..bf3421667ecc 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -4,6 +4,7 @@ * Copyright (C) 2012-2016 NVIDIA CORPORATION. All rights reserved. */ +#include <linux/aperture.h> #include <linux/bitops.h> #include <linux/host1x.h> #include <linux/idr.h> @@ -12,7 +13,6 @@ #include <linux/platform_device.h> #include <linux/pm_runtime.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_client_setup.h> @@ -1156,8 +1156,8 @@ static int host1x_drm_probe(struct host1x_device *dev) if (host1x_drm_wants_iommu(dev) && device_iommu_mapped(dma_dev)) { tegra->domain = iommu_paging_domain_alloc(dma_dev); - if (!tegra->domain) { - err = -ENOMEM; + if (IS_ERR(tegra->domain)) { + err = PTR_ERR(tegra->domain); goto free; } @@ -1258,7 +1258,7 @@ static int host1x_drm_probe(struct host1x_device *dev) * will not expose any modesetting features. */ if (drm->mode_config.num_crtc > 0) { - err = drm_aperture_remove_framebuffers(&tegra_drm_driver); + err = aperture_remove_all_conflicting_devices(tegra_drm_driver.name); if (err < 0) goto hub; } else { diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c index db606e151afc..4a8cd9ed0a94 100644 --- a/drivers/gpu/drm/tegra/dsi.c +++ b/drivers/gpu/drm/tegra/dsi.c @@ -1713,5 +1713,5 @@ struct platform_driver tegra_dsi_driver = { .of_match_table = tegra_dsi_of_match, }, .probe = tegra_dsi_probe, - .remove_new = tegra_dsi_remove, + .remove = tegra_dsi_remove, }; diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c index d275404ad0e9..ace3e5a805cf 100644 --- a/drivers/gpu/drm/tegra/gem.c +++ b/drivers/gpu/drm/tegra/gem.c @@ -22,7 +22,7 @@ #include "drm.h" #include "gem.h" -MODULE_IMPORT_NS(DMA_BUF); +MODULE_IMPORT_NS("DMA_BUF"); static unsigned int sg_dma_count_chunks(struct scatterlist *sgl, unsigned int nents) { diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c index a160d01f26e1..21f4dd0fa6af 100644 --- a/drivers/gpu/drm/tegra/gr2d.c +++ b/drivers/gpu/drm/tegra/gr2d.c @@ -394,5 +394,5 @@ struct platform_driver tegra_gr2d_driver = { .pm = &tegra_gr2d_pm, }, .probe = gr2d_probe, - .remove_new = gr2d_remove, + .remove = gr2d_remove, }; diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c index 00c8564520e7..42e9656ab80c 100644 --- a/drivers/gpu/drm/tegra/gr3d.c +++ b/drivers/gpu/drm/tegra/gr3d.c @@ -46,6 +46,7 @@ struct gr3d { unsigned int nclocks; struct reset_control_bulk_data resets[RST_GR3D_MAX]; unsigned int nresets; + struct dev_pm_domain_list *pd_list; DECLARE_BITMAP(addr_regs, GR3D_NUM_REGS); }; @@ -369,18 +370,13 @@ static int gr3d_power_up_legacy_domain(struct device *dev, const char *name, return 0; } -static void gr3d_del_link(void *link) -{ - device_link_del(link); -} - static int gr3d_init_power(struct device *dev, struct gr3d *gr3d) { - static const char * const opp_genpd_names[] = { "3d0", "3d1", NULL }; - const u32 link_flags = DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME; - struct device **opp_virt_devs, *pd_dev; - struct device_link *link; - unsigned int i; + struct dev_pm_domain_attach_data pd_data = { + .pd_names = (const char *[]) { "3d0", "3d1" }, + .num_pd_names = 2, + .pd_flags = PD_FLAG_REQUIRED_OPP, + }; int err; err = of_count_phandle_with_args(dev->of_node, "power-domains", @@ -414,29 +410,10 @@ static int gr3d_init_power(struct device *dev, struct gr3d *gr3d) if (dev->pm_domain) return 0; - err = devm_pm_opp_attach_genpd(dev, opp_genpd_names, &opp_virt_devs); - if (err) + err = devm_pm_domain_attach_list(dev, &pd_data, &gr3d->pd_list); + if (err < 0) return err; - for (i = 0; opp_genpd_names[i]; i++) { - pd_dev = opp_virt_devs[i]; - if (!pd_dev) { - dev_err(dev, "failed to get %s power domain\n", - opp_genpd_names[i]); - return -EINVAL; - } - - link = device_link_add(dev, pd_dev, link_flags); - if (!link) { - dev_err(dev, "failed to link to %s\n", dev_name(pd_dev)); - return -EINVAL; - } - - err = devm_add_action_or_reset(dev, gr3d_del_link, link); - if (err) - return err; - } - return 0; } @@ -628,5 +605,5 @@ struct platform_driver tegra_gr3d_driver = { .pm = &tegra_gr3d_pm, }, .probe = gr3d_probe, - .remove_new = gr3d_remove, + .remove = gr3d_remove, }; diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c index 6bf2dae82ca0..e705f8590c13 100644 --- a/drivers/gpu/drm/tegra/hdmi.c +++ b/drivers/gpu/drm/tegra/hdmi.c @@ -1919,5 +1919,5 @@ struct platform_driver tegra_hdmi_driver = { .of_match_table = tegra_hdmi_of_match, }, .probe = tegra_hdmi_probe, - .remove_new = tegra_hdmi_remove, + .remove = tegra_hdmi_remove, }; diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c index e0c2019a591b..fa6140fc37fb 100644 --- a/drivers/gpu/drm/tegra/hub.c +++ b/drivers/gpu/drm/tegra/hub.c @@ -1218,5 +1218,5 @@ struct platform_driver tegra_display_hub_driver = { .of_match_table = tegra_display_hub_of_match, }, .probe = tegra_display_hub_probe, - .remove_new = tegra_display_hub_remove, + .remove = tegra_display_hub_remove, }; diff --git a/drivers/gpu/drm/tegra/nvdec.c b/drivers/gpu/drm/tegra/nvdec.c index 4860790666af..2d9a0a3f6c38 100644 --- a/drivers/gpu/drm/tegra/nvdec.c +++ b/drivers/gpu/drm/tegra/nvdec.c @@ -566,7 +566,7 @@ struct platform_driver tegra_nvdec_driver = { .pm = &nvdec_pm_ops }, .probe = nvdec_probe, - .remove_new = nvdec_remove, + .remove = nvdec_remove, }; #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c index bad3b8fcc726..802d2db7007a 100644 --- a/drivers/gpu/drm/tegra/sor.c +++ b/drivers/gpu/drm/tegra/sor.c @@ -4040,5 +4040,5 @@ struct platform_driver tegra_sor_driver = { .pm = &tegra_sor_pm_ops, }, .probe = tegra_sor_probe, - .remove_new = tegra_sor_remove, + .remove = tegra_sor_remove, }; diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c index 73c356f1c901..332c9b563d3f 100644 --- a/drivers/gpu/drm/tegra/vic.c +++ b/drivers/gpu/drm/tegra/vic.c @@ -553,7 +553,7 @@ struct platform_driver tegra_vic_driver = { .pm = &vic_pm_ops }, .probe = vic_probe, - .remove_new = vic_remove, + .remove = vic_remove, }; #if IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC) diff --git a/drivers/gpu/drm/tests/drm_connector_test.c b/drivers/gpu/drm/tests/drm_connector_test.c index 15e36a8db685..6bba97d0be88 100644 --- a/drivers/gpu/drm/tests/drm_connector_test.c +++ b/drivers/gpu/drm/tests/drm_connector_test.c @@ -996,7 +996,7 @@ static void drm_test_drm_hdmi_compute_mode_clock_rgb(struct kunit *test) unsigned long long rate; struct drm_device *drm = &priv->drm; - mode = drm_display_mode_from_cea_vic(drm, 16); + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 16); KUNIT_ASSERT_NOT_NULL(test, mode); KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK); @@ -1017,7 +1017,7 @@ static void drm_test_drm_hdmi_compute_mode_clock_rgb_10bpc(struct kunit *test) unsigned long long rate; struct drm_device *drm = &priv->drm; - mode = drm_display_mode_from_cea_vic(drm, 16); + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 16); KUNIT_ASSERT_NOT_NULL(test, mode); KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK); @@ -1038,7 +1038,7 @@ static void drm_test_drm_hdmi_compute_mode_clock_rgb_10bpc_vic_1(struct kunit *t unsigned long long rate; struct drm_device *drm = &priv->drm; - mode = drm_display_mode_from_cea_vic(drm, 1); + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1); KUNIT_ASSERT_NOT_NULL(test, mode); rate = drm_hdmi_compute_mode_clock(mode, 10, HDMI_COLORSPACE_RGB); @@ -1056,7 +1056,7 @@ static void drm_test_drm_hdmi_compute_mode_clock_rgb_12bpc(struct kunit *test) unsigned long long rate; struct drm_device *drm = &priv->drm; - mode = drm_display_mode_from_cea_vic(drm, 16); + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 16); KUNIT_ASSERT_NOT_NULL(test, mode); KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK); @@ -1077,7 +1077,7 @@ static void drm_test_drm_hdmi_compute_mode_clock_rgb_12bpc_vic_1(struct kunit *t unsigned long long rate; struct drm_device *drm = &priv->drm; - mode = drm_display_mode_from_cea_vic(drm, 1); + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1); KUNIT_ASSERT_NOT_NULL(test, mode); rate = drm_hdmi_compute_mode_clock(mode, 12, HDMI_COLORSPACE_RGB); @@ -1095,7 +1095,7 @@ static void drm_test_drm_hdmi_compute_mode_clock_rgb_double(struct kunit *test) unsigned long long rate; struct drm_device *drm = &priv->drm; - mode = drm_display_mode_from_cea_vic(drm, 6); + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 6); KUNIT_ASSERT_NOT_NULL(test, mode); KUNIT_ASSERT_TRUE(test, mode->flags & DRM_MODE_FLAG_DBLCLK); @@ -1118,7 +1118,7 @@ static void drm_test_connector_hdmi_compute_mode_clock_yuv420_valid(struct kunit unsigned long long rate; unsigned int vic = *(unsigned int *)test->param_value; - mode = drm_display_mode_from_cea_vic(drm, vic); + mode = drm_kunit_display_mode_from_cea_vic(test, drm, vic); KUNIT_ASSERT_NOT_NULL(test, mode); KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK); @@ -1155,7 +1155,7 @@ static void drm_test_connector_hdmi_compute_mode_clock_yuv420_10_bpc(struct kuni drm_hdmi_compute_mode_clock_yuv420_vic_valid_tests[0]; unsigned long long rate; - mode = drm_display_mode_from_cea_vic(drm, vic); + mode = drm_kunit_display_mode_from_cea_vic(test, drm, vic); KUNIT_ASSERT_NOT_NULL(test, mode); KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK); @@ -1180,7 +1180,7 @@ static void drm_test_connector_hdmi_compute_mode_clock_yuv420_12_bpc(struct kuni drm_hdmi_compute_mode_clock_yuv420_vic_valid_tests[0]; unsigned long long rate; - mode = drm_display_mode_from_cea_vic(drm, vic); + mode = drm_kunit_display_mode_from_cea_vic(test, drm, vic); KUNIT_ASSERT_NOT_NULL(test, mode); KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK); @@ -1203,7 +1203,7 @@ static void drm_test_connector_hdmi_compute_mode_clock_yuv422_8_bpc(struct kunit struct drm_device *drm = &priv->drm; unsigned long long rate; - mode = drm_display_mode_from_cea_vic(drm, 16); + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 16); KUNIT_ASSERT_NOT_NULL(test, mode); KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK); @@ -1225,7 +1225,7 @@ static void drm_test_connector_hdmi_compute_mode_clock_yuv422_10_bpc(struct kuni struct drm_device *drm = &priv->drm; unsigned long long rate; - mode = drm_display_mode_from_cea_vic(drm, 16); + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 16); KUNIT_ASSERT_NOT_NULL(test, mode); KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK); @@ -1247,7 +1247,7 @@ static void drm_test_connector_hdmi_compute_mode_clock_yuv422_12_bpc(struct kuni struct drm_device *drm = &priv->drm; unsigned long long rate; - mode = drm_display_mode_from_cea_vic(drm, 16); + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 16); KUNIT_ASSERT_NOT_NULL(test, mode); KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK); diff --git a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c index 34ee95d41f29..294773342e71 100644 --- a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c +++ b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c @@ -441,7 +441,7 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode_vic_1(struct kunit *test) ctx = drm_kunit_helper_acquire_ctx_alloc(test); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); - mode = drm_display_mode_from_cea_vic(drm, 1); + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1); KUNIT_ASSERT_NOT_NULL(test, mode); drm = &priv->drm; @@ -555,7 +555,7 @@ static void drm_test_check_broadcast_rgb_full_cea_mode_vic_1(struct kunit *test) ctx = drm_kunit_helper_acquire_ctx_alloc(test); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); - mode = drm_display_mode_from_cea_vic(drm, 1); + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1); KUNIT_ASSERT_NOT_NULL(test, mode); drm = &priv->drm; @@ -671,7 +671,7 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode_vic_1(struct kunit *te ctx = drm_kunit_helper_acquire_ctx_alloc(test); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); - mode = drm_display_mode_from_cea_vic(drm, 1); + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1); KUNIT_ASSERT_NOT_NULL(test, mode); drm = &priv->drm; @@ -1263,7 +1263,7 @@ static void drm_test_check_output_bpc_format_vic_1(struct kunit *test) ctx = drm_kunit_helper_acquire_ctx_alloc(test); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); - mode = drm_display_mode_from_cea_vic(drm, 1); + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1); KUNIT_ASSERT_NOT_NULL(test, mode); /* diff --git a/drivers/gpu/drm/tests/drm_kunit_helpers.c b/drivers/gpu/drm/tests/drm_kunit_helpers.c index aa62719dab0e..04a6b8cc62ac 100644 --- a/drivers/gpu/drm/tests/drm_kunit_helpers.c +++ b/drivers/gpu/drm/tests/drm_kunit_helpers.c @@ -3,6 +3,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> +#include <drm/drm_edid.h> #include <drm/drm_fourcc.h> #include <drm/drm_kunit_helpers.h> #include <drm/drm_managed.h> @@ -311,6 +312,47 @@ drm_kunit_helper_create_crtc(struct kunit *test, } EXPORT_SYMBOL_GPL(drm_kunit_helper_create_crtc); +static void kunit_action_drm_mode_destroy(void *ptr) +{ + struct drm_display_mode *mode = ptr; + + drm_mode_destroy(NULL, mode); +} + +/** + * drm_kunit_display_mode_from_cea_vic() - return a mode for CEA VIC + for a KUnit test + * @test: The test context object + * @dev: DRM device + * @video_code: CEA VIC of the mode + * + * Creates a new mode matching the specified CEA VIC for a KUnit test. + * + * Resources will be cleaned up automatically. + * + * Returns: A new drm_display_mode on success or NULL on failure + */ +struct drm_display_mode * +drm_kunit_display_mode_from_cea_vic(struct kunit *test, struct drm_device *dev, + u8 video_code) +{ + struct drm_display_mode *mode; + int ret; + + mode = drm_display_mode_from_cea_vic(dev, video_code); + if (!mode) + return NULL; + + ret = kunit_add_action_or_reset(test, + kunit_action_drm_mode_destroy, + mode); + if (ret) + return NULL; + + return mode; +} +EXPORT_SYMBOL_GPL(drm_kunit_display_mode_from_cea_vic); + MODULE_AUTHOR("Maxime Ripard <maxime@cerno.tech>"); MODULE_DESCRIPTION("KUnit test suite helper functions"); MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c index 2428b9aaa003..7c8fd6407d82 100644 --- a/drivers/gpu/drm/tidss/tidss_drv.c +++ b/drivers/gpu/drm/tidss/tidss_drv.c @@ -252,7 +252,7 @@ MODULE_DEVICE_TABLE(of, tidss_of_table); static struct platform_driver tidss_platform_driver = { .probe = tidss_probe, - .remove_new = tidss_remove, + .remove = tidss_remove, .shutdown = tidss_shutdown, .driver = { .name = "tidss", diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c index 8c9f3705aa6c..6f0df8d6b90c 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c @@ -590,7 +590,7 @@ MODULE_DEVICE_TABLE(of, tilcdc_of_match); static struct platform_driver tilcdc_platform_driver = { .probe = tilcdc_pdev_probe, - .remove_new = tilcdc_pdev_remove, + .remove = tilcdc_pdev_remove, .shutdown = tilcdc_pdev_shutdown, .driver = { .name = "tilcdc", diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c index 5f2d1b6f9ee9..262f290d85d9 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c @@ -390,7 +390,7 @@ static const struct of_device_id panel_of_match[] = { static struct platform_driver panel_driver = { .probe = panel_probe, - .remove_new = panel_remove, + .remove = panel_remove, .driver = { .name = "tilcdc-panel", .of_match_table = panel_of_match, diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig index f542aae334d5..94cbdb1337c0 100644 --- a/drivers/gpu/drm/tiny/Kconfig +++ b/drivers/gpu/drm/tiny/Kconfig @@ -198,6 +198,27 @@ config TINYDRM_REPAPER If M is selected the module will be called repaper. +config TINYDRM_SHARP_MEMORY + tristate "DRM support for Sharp Memory LCD panels" + depends on DRM && SPI + select DRM_CLIENT_SELECTION + select DRM_GEM_DMA_HELPER + select DRM_KMS_HELPER + help + DRM Driver for the following Sharp Memory Panels: + * 1.00" Sharp Memory LCD (LS010B7DH04) + * 1.10" Sharp Memory LCD (LS011B7DH03) + * 1.20" Sharp Memory LCD (LS012B7DD01) + * 1.28" Sharp Memory LCD (LS013B7DH03) + * 1.26" Sharp Memory LCD (LS013B7DH05) + * 1.80" Sharp Memory LCD (LS018B7DH02) + * 2.70" Sharp Memory LCD (LS027B7DH01) + * 2.70" Sharp Memory LCD (LS027B7DH01A) + * 3.20" Sharp Memory LCD (LS032B7DD02) + * 4.40" Sharp Memory LCD (LS044Q7DH01) + + If M is selected the module will be called sharp_memory. + config TINYDRM_ST7586 tristate "DRM support for Sitronix ST7586 display panels" depends on DRM && SPI diff --git a/drivers/gpu/drm/tiny/Makefile b/drivers/gpu/drm/tiny/Makefile index 76dde89a044b..4aaf56f8707d 100644 --- a/drivers/gpu/drm/tiny/Makefile +++ b/drivers/gpu/drm/tiny/Makefile @@ -14,5 +14,6 @@ obj-$(CONFIG_TINYDRM_ILI9341) += ili9341.o obj-$(CONFIG_TINYDRM_ILI9486) += ili9486.o obj-$(CONFIG_TINYDRM_MI0283QT) += mi0283qt.o obj-$(CONFIG_TINYDRM_REPAPER) += repaper.o +obj-$(CONFIG_TINYDRM_SHARP_MEMORY) += sharp-memory.o obj-$(CONFIG_TINYDRM_ST7586) += st7586.o obj-$(CONFIG_TINYDRM_ST7735R) += st7735r.o diff --git a/drivers/gpu/drm/tiny/arcpgu.c b/drivers/gpu/drm/tiny/arcpgu.c index 81abedec435d..0cc68042a6d6 100644 --- a/drivers/gpu/drm/tiny/arcpgu.c +++ b/drivers/gpu/drm/tiny/arcpgu.c @@ -423,7 +423,7 @@ MODULE_DEVICE_TABLE(of, arcpgu_of_table); static struct platform_driver arcpgu_platform_driver = { .probe = arcpgu_probe, - .remove_new = arcpgu_remove, + .remove = arcpgu_remove, .driver = { .name = "arcpgu", .of_match_table = arcpgu_of_table, diff --git a/drivers/gpu/drm/tiny/bochs.c b/drivers/gpu/drm/tiny/bochs.c index 5d2d34976d4b..6f91ff1dbf7e 100644 --- a/drivers/gpu/drm/tiny/bochs.c +++ b/drivers/gpu/drm/tiny/bochs.c @@ -1,9 +1,10 @@ // SPDX-License-Identifier: GPL-2.0-or-later +#include <linux/bug.h> +#include <linux/aperture.h> #include <linux/module.h> #include <linux/pci.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_client_setup.h> @@ -106,12 +107,17 @@ static struct bochs_device *to_bochs_device(const struct drm_device *dev) /* ---------------------------------------------------------------------- */ +static __always_inline bool bochs_uses_mmio(struct bochs_device *bochs) +{ + return !IS_ENABLED(CONFIG_HAS_IOPORT) || bochs->mmio; +} + static void bochs_vga_writeb(struct bochs_device *bochs, u16 ioport, u8 val) { if (WARN_ON(ioport < 0x3c0 || ioport > 0x3df)) return; - if (bochs->mmio) { + if (bochs_uses_mmio(bochs)) { int offset = ioport - 0x3c0 + 0x400; writeb(val, bochs->mmio + offset); @@ -125,7 +131,7 @@ static u8 bochs_vga_readb(struct bochs_device *bochs, u16 ioport) if (WARN_ON(ioport < 0x3c0 || ioport > 0x3df)) return 0xff; - if (bochs->mmio) { + if (bochs_uses_mmio(bochs)) { int offset = ioport - 0x3c0 + 0x400; return readb(bochs->mmio + offset); @@ -138,7 +144,7 @@ static u16 bochs_dispi_read(struct bochs_device *bochs, u16 reg) { u16 ret = 0; - if (bochs->mmio) { + if (bochs_uses_mmio(bochs)) { int offset = 0x500 + (reg << 1); ret = readw(bochs->mmio + offset); @@ -151,7 +157,7 @@ static u16 bochs_dispi_read(struct bochs_device *bochs, u16 reg) static void bochs_dispi_write(struct bochs_device *bochs, u16 reg, u16 val) { - if (bochs->mmio) { + if (bochs_uses_mmio(bochs)) { int offset = 0x500 + (reg << 1); writew(val, bochs->mmio + offset); @@ -236,7 +242,7 @@ static int bochs_hw_init(struct bochs_device *bochs) DRM_ERROR("Cannot map mmio region\n"); return -ENOMEM; } - } else { + } else if (IS_ENABLED(CONFIG_HAS_IOPORT)) { ioaddr = VBE_DISPI_IOPORT_INDEX; iosize = 2; if (!devm_request_region(&pdev->dev, ioaddr, iosize, "bochs-drm")) { @@ -244,6 +250,9 @@ static int bochs_hw_init(struct bochs_device *bochs) return -EBUSY; } bochs->ioports = 1; + } else { + dev_err(dev->dev, "I/O ports are not supported\n"); + return -EIO; } id = bochs_dispi_read(bochs, VBE_DISPI_INDEX_ID); @@ -711,13 +720,13 @@ static int bochs_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent struct drm_device *dev; int ret; - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &bochs_driver); + ret = aperture_remove_conflicting_pci_devices(pdev, bochs_driver.name); if (ret) return ret; bochs = devm_drm_dev_alloc(&pdev->dev, &bochs_driver, struct bochs_device, dev); if (IS_ERR(bochs)) - return PTR_ERR(dev); + return PTR_ERR(bochs); dev = &bochs->dev; ret = pcim_enable_device(pdev); diff --git a/drivers/gpu/drm/tiny/cirrus.c b/drivers/gpu/drm/tiny/cirrus.c index 20c4479ed4c3..4d2adcaeaa60 100644 --- a/drivers/gpu/drm/tiny/cirrus.c +++ b/drivers/gpu/drm/tiny/cirrus.c @@ -16,6 +16,7 @@ * Copyright 1999-2001 Jeff Garzik <jgarzik@pobox.com> */ +#include <linux/aperture.h> #include <linux/iosys-map.h> #include <linux/module.h> #include <linux/pci.h> @@ -23,7 +24,6 @@ #include <video/cirrus.h> #include <video/vga.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_atomic_state_helper.h> @@ -510,8 +510,10 @@ static void cirrus_crtc_helper_atomic_enable(struct drm_crtc *crtc, cirrus_mode_set(cirrus, &crtc_state->mode); +#ifdef CONFIG_HAS_IOPORT /* Unblank (needed on S3 resume, vgabios doesn't do it then) */ outb(VGA_AR_ENABLE_DISPLAY, VGA_ATT_W); +#endif drm_dev_exit(idx); } @@ -673,7 +675,7 @@ static int cirrus_pci_probe(struct pci_dev *pdev, struct cirrus_device *cirrus; int ret; - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &cirrus_driver); + ret = aperture_remove_conflicting_pci_devices(pdev, cirrus_driver.name); if (ret) return ret; diff --git a/drivers/gpu/drm/tiny/ofdrm.c b/drivers/gpu/drm/tiny/ofdrm.c index 04ffa93de44a..9898eab5e9e2 100644 --- a/drivers/gpu/drm/tiny/ofdrm.c +++ b/drivers/gpu/drm/tiny/ofdrm.c @@ -1,10 +1,10 @@ // SPDX-License-Identifier: GPL-2.0-only +#include <linux/aperture.h> #include <linux/of_address.h> #include <linux/pci.h> #include <linux/platform_device.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_state_helper.h> #include <drm/drm_client_setup.h> @@ -1220,7 +1220,7 @@ static struct ofdrm_device *ofdrm_device_create(struct drm_driver *drv, fb_pgbase = round_down(fb_base, PAGE_SIZE); fb_pgsize = fb_base - fb_pgbase + round_up(fb_size, PAGE_SIZE); - ret = devm_aperture_acquire_from_firmware(dev, fb_pgbase, fb_pgsize); + ret = devm_aperture_acquire_for_platform_device(pdev, fb_pgbase, fb_pgsize); if (ret) { drm_err(dev, "could not acquire memory range %pr: error %d\n", &res, ret); return ERR_PTR(ret); @@ -1398,7 +1398,7 @@ static struct platform_driver ofdrm_platform_driver = { .of_match_table = ofdrm_of_match_display, }, .probe = ofdrm_probe, - .remove_new = ofdrm_remove, + .remove = ofdrm_remove, }; module_platform_driver(ofdrm_platform_driver); diff --git a/drivers/gpu/drm/tiny/sharp-memory.c b/drivers/gpu/drm/tiny/sharp-memory.c new file mode 100644 index 000000000000..2d2315bd6aef --- /dev/null +++ b/drivers/gpu/drm/tiny/sharp-memory.c @@ -0,0 +1,671 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> +#include <drm/drm_connector.h> +#include <drm/drm_damage_helper.h> +#include <drm/drm_drv.h> +#include <drm/drm_fb_dma_helper.h> +#include <drm/drm_fbdev_dma.h> +#include <drm/drm_format_helper.h> +#include <drm/drm_framebuffer.h> +#include <drm/drm_gem_atomic_helper.h> +#include <drm/drm_gem_dma_helper.h> +#include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_managed.h> +#include <drm/drm_modes.h> +#include <drm/drm_probe_helper.h> +#include <drm/drm_rect.h> +#include <linux/bitrev.h> +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/kthread.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/pwm.h> +#include <linux/spi/spi.h> + +#define SHARP_MODE_PERIOD 8 +#define SHARP_ADDR_PERIOD 8 +#define SHARP_DUMMY_PERIOD 8 + +#define SHARP_MEMORY_DISPLAY_MAINTAIN_MODE 0 +#define SHARP_MEMORY_DISPLAY_UPDATE_MODE 1 +#define SHARP_MEMORY_DISPLAY_CLEAR_MODE 4 + +enum sharp_memory_model { + LS010B7DH04, + LS011B7DH03, + LS012B7DD01, + LS013B7DH03, + LS013B7DH05, + LS018B7DH02, + LS027B7DH01, + LS027B7DH01A, + LS032B7DD02, + LS044Q7DH01, +}; + +enum sharp_memory_vcom_mode { + SHARP_MEMORY_SOFTWARE_VCOM, + SHARP_MEMORY_EXTERNAL_VCOM, + SHARP_MEMORY_PWM_VCOM +}; + +struct sharp_memory_device { + struct drm_device drm; + struct spi_device *spi; + + const struct drm_display_mode *mode; + + struct drm_crtc crtc; + struct drm_plane plane; + struct drm_encoder encoder; + struct drm_connector connector; + + struct gpio_desc *enable_gpio; + + struct task_struct *sw_vcom_signal; + struct pwm_device *pwm_vcom_signal; + + enum sharp_memory_vcom_mode vcom_mode; + u8 vcom; + + u32 pitch; + u32 tx_buffer_size; + u8 *tx_buffer; + + /* When vcom_mode == "software" a kthread is used to periodically send a + * 'maintain display' message over spi. This mutex ensures tx_buffer access + * and spi bus usage is synchronized in this case. + */ + struct mutex tx_mutex; +}; + +static inline int sharp_memory_spi_write(struct spi_device *spi, void *buf, size_t len) +{ + /* Reverse the bit order */ + for (u8 *b = buf; b < ((u8 *)buf) + len; ++b) + *b = bitrev8(*b); + + return spi_write(spi, buf, len); +} + +static inline struct sharp_memory_device *drm_to_sharp_memory_device(struct drm_device *drm) +{ + return container_of(drm, struct sharp_memory_device, drm); +} + +DEFINE_DRM_GEM_DMA_FOPS(sharp_memory_fops); + +static const struct drm_driver sharp_memory_drm_driver = { + .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, + .fops = &sharp_memory_fops, + DRM_GEM_DMA_DRIVER_OPS_VMAP, + DRM_FBDEV_DMA_DRIVER_OPS, + .name = "sharp_memory_display", + .desc = "Sharp Display Memory LCD", + .date = "20231129", + .major = 1, + .minor = 0, +}; + +static inline void sharp_memory_set_tx_buffer_mode(u8 *buffer, u8 mode, u8 vcom) +{ + *buffer = mode | (vcom << 1); +} + +static inline void sharp_memory_set_tx_buffer_addresses(u8 *buffer, + struct drm_rect clip, + u32 pitch) +{ + for (u32 line = 0; line < clip.y2; ++line) + buffer[line * pitch] = line + 1; +} + +static void sharp_memory_set_tx_buffer_data(u8 *buffer, + struct drm_framebuffer *fb, + struct drm_rect clip, + u32 pitch, + struct drm_format_conv_state *fmtcnv_state) +{ + int ret; + struct iosys_map dst, vmap; + struct drm_gem_dma_object *dma_obj = drm_fb_dma_get_gem_obj(fb, 0); + + ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE); + if (ret) + return; + + iosys_map_set_vaddr(&dst, buffer); + iosys_map_set_vaddr(&vmap, dma_obj->vaddr); + + drm_fb_xrgb8888_to_mono(&dst, &pitch, &vmap, fb, &clip, fmtcnv_state); + + drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); +} + +static int sharp_memory_update_display(struct sharp_memory_device *smd, + struct drm_framebuffer *fb, + struct drm_rect clip, + struct drm_format_conv_state *fmtcnv_state) +{ + int ret; + u32 pitch = smd->pitch; + u8 vcom = smd->vcom; + u8 *tx_buffer = smd->tx_buffer; + u32 tx_buffer_size = smd->tx_buffer_size; + + mutex_lock(&smd->tx_mutex); + + /* Populate the transmit buffer with frame data */ + sharp_memory_set_tx_buffer_mode(&tx_buffer[0], + SHARP_MEMORY_DISPLAY_UPDATE_MODE, vcom); + sharp_memory_set_tx_buffer_addresses(&tx_buffer[1], clip, pitch); + sharp_memory_set_tx_buffer_data(&tx_buffer[2], fb, clip, pitch, fmtcnv_state); + + ret = sharp_memory_spi_write(smd->spi, tx_buffer, tx_buffer_size); + + mutex_unlock(&smd->tx_mutex); + + return ret; +} + +static int sharp_memory_maintain_display(struct sharp_memory_device *smd) +{ + int ret; + u8 vcom = smd->vcom; + u8 *tx_buffer = smd->tx_buffer; + + mutex_lock(&smd->tx_mutex); + + sharp_memory_set_tx_buffer_mode(&tx_buffer[0], SHARP_MEMORY_DISPLAY_MAINTAIN_MODE, vcom); + tx_buffer[1] = 0; /* Write dummy data */ + ret = sharp_memory_spi_write(smd->spi, tx_buffer, 2); + + mutex_unlock(&smd->tx_mutex); + + return ret; +} + +static int sharp_memory_clear_display(struct sharp_memory_device *smd) +{ + int ret; + u8 vcom = smd->vcom; + u8 *tx_buffer = smd->tx_buffer; + + mutex_lock(&smd->tx_mutex); + + sharp_memory_set_tx_buffer_mode(&tx_buffer[0], SHARP_MEMORY_DISPLAY_CLEAR_MODE, vcom); + tx_buffer[1] = 0; /* write dummy data */ + ret = sharp_memory_spi_write(smd->spi, tx_buffer, 2); + + mutex_unlock(&smd->tx_mutex); + + return ret; +} + +static void sharp_memory_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect, + struct drm_format_conv_state *fmtconv_state) +{ + struct drm_rect clip; + struct sharp_memory_device *smd = drm_to_sharp_memory_device(fb->dev); + + /* Always update a full line regardless of what is dirty */ + clip.x1 = 0; + clip.x2 = fb->width; + clip.y1 = rect->y1; + clip.y2 = rect->y2; + + sharp_memory_update_display(smd, fb, clip, fmtconv_state); +} + +static int sharp_memory_plane_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane); + struct sharp_memory_device *smd; + struct drm_crtc_state *crtc_state; + + smd = container_of(plane, struct sharp_memory_device, plane); + crtc_state = drm_atomic_get_new_crtc_state(state, &smd->crtc); + + return drm_atomic_helper_check_plane_state(plane_state, crtc_state, + DRM_PLANE_NO_SCALING, + DRM_PLANE_NO_SCALING, + false, false); +} + +static void sharp_memory_plane_atomic_update(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane); + struct drm_plane_state *plane_state = plane->state; + struct drm_format_conv_state fmtcnv_state = DRM_FORMAT_CONV_STATE_INIT; + struct sharp_memory_device *smd; + struct drm_rect rect; + + smd = container_of(plane, struct sharp_memory_device, plane); + if (!smd->crtc.state->active) + return; + + if (drm_atomic_helper_damage_merged(old_state, plane_state, &rect)) + sharp_memory_fb_dirty(plane_state->fb, &rect, &fmtcnv_state); + + drm_format_conv_state_release(&fmtcnv_state); +} + +static const struct drm_plane_helper_funcs sharp_memory_plane_helper_funcs = { + .prepare_fb = drm_gem_plane_helper_prepare_fb, + .atomic_check = sharp_memory_plane_atomic_check, + .atomic_update = sharp_memory_plane_atomic_update, +}; + +static bool sharp_memory_format_mod_supported(struct drm_plane *plane, + u32 format, + u64 modifier) +{ + return modifier == DRM_FORMAT_MOD_LINEAR; +} + +static const struct drm_plane_funcs sharp_memory_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = drm_plane_cleanup, + .reset = drm_atomic_helper_plane_reset, + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, + .format_mod_supported = sharp_memory_format_mod_supported, +}; + +static enum drm_mode_status sharp_memory_crtc_mode_valid(struct drm_crtc *crtc, + const struct drm_display_mode *mode) +{ + struct sharp_memory_device *smd = drm_to_sharp_memory_device(crtc->dev); + + return drm_crtc_helper_mode_valid_fixed(crtc, mode, smd->mode); +} + +static int sharp_memory_crtc_check(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc); + int ret; + + if (!crtc_state->enable) + goto out; + + ret = drm_atomic_helper_check_crtc_primary_plane(crtc_state); + if (ret) + return ret; + +out: + return drm_atomic_add_affected_planes(state, crtc); +} + +static int sharp_memory_sw_vcom_signal_thread(void *data) +{ + struct sharp_memory_device *smd = data; + + while (!kthread_should_stop()) { + smd->vcom ^= 1; /* Toggle vcom */ + sharp_memory_maintain_display(smd); + msleep(1000); + } + + return 0; +} + +static void sharp_memory_crtc_enable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct sharp_memory_device *smd = drm_to_sharp_memory_device(crtc->dev); + + sharp_memory_clear_display(smd); + + if (smd->enable_gpio) + gpiod_set_value(smd->enable_gpio, 1); +} + +static void sharp_memory_crtc_disable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct sharp_memory_device *smd = drm_to_sharp_memory_device(crtc->dev); + + sharp_memory_clear_display(smd); + + if (smd->enable_gpio) + gpiod_set_value(smd->enable_gpio, 0); +} + +static const struct drm_crtc_helper_funcs sharp_memory_crtc_helper_funcs = { + .mode_valid = sharp_memory_crtc_mode_valid, + .atomic_check = sharp_memory_crtc_check, + .atomic_enable = sharp_memory_crtc_enable, + .atomic_disable = sharp_memory_crtc_disable, +}; + +static const struct drm_crtc_funcs sharp_memory_crtc_funcs = { + .reset = drm_atomic_helper_crtc_reset, + .destroy = drm_crtc_cleanup, + .set_config = drm_atomic_helper_set_config, + .page_flip = drm_atomic_helper_page_flip, + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, +}; + +static const struct drm_encoder_funcs sharp_memory_encoder_funcs = { + .destroy = drm_encoder_cleanup, +}; + +static int sharp_memory_connector_get_modes(struct drm_connector *connector) +{ + struct sharp_memory_device *smd = drm_to_sharp_memory_device(connector->dev); + + return drm_connector_helper_get_modes_fixed(connector, smd->mode); +} + +static const struct drm_connector_helper_funcs sharp_memory_connector_hfuncs = { + .get_modes = sharp_memory_connector_get_modes, +}; + +static const struct drm_connector_funcs sharp_memory_connector_funcs = { + .reset = drm_atomic_helper_connector_reset, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = drm_connector_cleanup, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + +}; + +static const struct drm_mode_config_funcs sharp_memory_mode_config_funcs = { + .fb_create = drm_gem_fb_create_with_dirty, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, +}; + +static const struct drm_display_mode sharp_memory_ls010b7dh04_mode = { + DRM_SIMPLE_MODE(128, 128, 18, 18), +}; + +static const struct drm_display_mode sharp_memory_ls011b7dh03_mode = { + DRM_SIMPLE_MODE(160, 68, 25, 10), +}; + +static const struct drm_display_mode sharp_memory_ls012b7dd01_mode = { + DRM_SIMPLE_MODE(184, 38, 29, 6), +}; + +static const struct drm_display_mode sharp_memory_ls013b7dh03_mode = { + DRM_SIMPLE_MODE(128, 128, 23, 23), +}; + +static const struct drm_display_mode sharp_memory_ls013b7dh05_mode = { + DRM_SIMPLE_MODE(144, 168, 20, 24), +}; + +static const struct drm_display_mode sharp_memory_ls018b7dh02_mode = { + DRM_SIMPLE_MODE(230, 303, 27, 36), +}; + +static const struct drm_display_mode sharp_memory_ls027b7dh01_mode = { + DRM_SIMPLE_MODE(400, 240, 58, 35), +}; + +static const struct drm_display_mode sharp_memory_ls032b7dd02_mode = { + DRM_SIMPLE_MODE(336, 536, 42, 68), +}; + +static const struct drm_display_mode sharp_memory_ls044q7dh01_mode = { + DRM_SIMPLE_MODE(320, 240, 89, 67), +}; + +static const struct spi_device_id sharp_memory_ids[] = { + {"ls010b7dh04", (kernel_ulong_t)&sharp_memory_ls010b7dh04_mode}, + {"ls011b7dh03", (kernel_ulong_t)&sharp_memory_ls011b7dh03_mode}, + {"ls012b7dd01", (kernel_ulong_t)&sharp_memory_ls012b7dd01_mode}, + {"ls013b7dh03", (kernel_ulong_t)&sharp_memory_ls013b7dh03_mode}, + {"ls013b7dh05", (kernel_ulong_t)&sharp_memory_ls013b7dh05_mode}, + {"ls018b7dh02", (kernel_ulong_t)&sharp_memory_ls018b7dh02_mode}, + {"ls027b7dh01", (kernel_ulong_t)&sharp_memory_ls027b7dh01_mode}, + {"ls027b7dh01a", (kernel_ulong_t)&sharp_memory_ls027b7dh01_mode}, + {"ls032b7dd02", (kernel_ulong_t)&sharp_memory_ls032b7dd02_mode}, + {"ls044q7dh01", (kernel_ulong_t)&sharp_memory_ls044q7dh01_mode}, + {}, +}; +MODULE_DEVICE_TABLE(spi, sharp_memory_ids); + +static const struct of_device_id sharp_memory_of_match[] = { + {.compatible = "sharp,ls010b7dh04", &sharp_memory_ls010b7dh04_mode}, + {.compatible = "sharp,ls011b7dh03", &sharp_memory_ls011b7dh03_mode}, + {.compatible = "sharp,ls012b7dd01", &sharp_memory_ls012b7dd01_mode}, + {.compatible = "sharp,ls013b7dh03", &sharp_memory_ls013b7dh03_mode}, + {.compatible = "sharp,ls013b7dh05", &sharp_memory_ls013b7dh05_mode}, + {.compatible = "sharp,ls018b7dh02", &sharp_memory_ls018b7dh02_mode}, + {.compatible = "sharp,ls027b7dh01", &sharp_memory_ls027b7dh01_mode}, + {.compatible = "sharp,ls027b7dh01a", &sharp_memory_ls027b7dh01_mode}, + {.compatible = "sharp,ls032b7dd02", &sharp_memory_ls032b7dd02_mode}, + {.compatible = "sharp,ls044q7dh01", &sharp_memory_ls044q7dh01_mode}, + {}, +}; +MODULE_DEVICE_TABLE(of, sharp_memory_of_match); + +static const u32 sharp_memory_formats[] = { + DRM_FORMAT_XRGB8888, +}; + +static int sharp_memory_pipe_init(struct drm_device *dev, + struct sharp_memory_device *smd, + const u32 *formats, unsigned int format_count, + const u64 *format_modifiers) +{ + int ret; + struct drm_encoder *encoder = &smd->encoder; + struct drm_plane *plane = &smd->plane; + struct drm_crtc *crtc = &smd->crtc; + struct drm_connector *connector = &smd->connector; + + drm_plane_helper_add(plane, &sharp_memory_plane_helper_funcs); + ret = drm_universal_plane_init(dev, plane, 0, + &sharp_memory_plane_funcs, + formats, format_count, + format_modifiers, + DRM_PLANE_TYPE_PRIMARY, NULL); + if (ret) + return ret; + + drm_crtc_helper_add(crtc, &sharp_memory_crtc_helper_funcs); + ret = drm_crtc_init_with_planes(dev, crtc, plane, NULL, + &sharp_memory_crtc_funcs, NULL); + if (ret) + return ret; + + encoder->possible_crtcs = drm_crtc_mask(crtc); + ret = drm_encoder_init(dev, encoder, &sharp_memory_encoder_funcs, + DRM_MODE_ENCODER_NONE, NULL); + if (ret) + return ret; + + ret = drm_connector_init(&smd->drm, &smd->connector, + &sharp_memory_connector_funcs, + DRM_MODE_CONNECTOR_SPI); + if (ret) + return ret; + + drm_connector_helper_add(&smd->connector, + &sharp_memory_connector_hfuncs); + + return drm_connector_attach_encoder(connector, encoder); +} + +static int sharp_memory_init_pwm_vcom_signal(struct sharp_memory_device *smd) +{ + int ret; + struct device *dev = &smd->spi->dev; + struct pwm_state pwm_state; + + smd->pwm_vcom_signal = devm_pwm_get(dev, NULL); + if (IS_ERR(smd->pwm_vcom_signal)) + return dev_err_probe(dev, PTR_ERR(smd->pwm_vcom_signal), + "Could not get pwm device\n"); + + pwm_init_state(smd->pwm_vcom_signal, &pwm_state); + pwm_set_relative_duty_cycle(&pwm_state, 1, 10); + pwm_state.enabled = true; + ret = pwm_apply_might_sleep(smd->pwm_vcom_signal, &pwm_state); + if (ret) + return dev_err_probe(dev, -EINVAL, "Could not apply pwm state\n"); + + return 0; +} + +static int sharp_memory_probe(struct spi_device *spi) +{ + int ret; + struct device *dev; + struct sharp_memory_device *smd; + struct drm_device *drm; + const char *vcom_mode_str; + + dev = &spi->dev; + + ret = spi_setup(spi); + if (ret < 0) + return dev_err_probe(dev, ret, "Failed to setup spi device\n"); + + if (!dev->coherent_dma_mask) { + ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32)); + if (ret) + return dev_err_probe(dev, ret, "Failed to set dma mask\n"); + } + + smd = devm_drm_dev_alloc(dev, &sharp_memory_drm_driver, + struct sharp_memory_device, drm); + if (!smd) + return -ENOMEM; + + spi_set_drvdata(spi, smd); + + smd->spi = spi; + drm = &smd->drm; + ret = drmm_mode_config_init(drm); + if (ret) + return dev_err_probe(dev, ret, "Failed to initialize drm config\n"); + + smd->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_HIGH); + if (!smd->enable_gpio) + dev_warn(dev, "Enable gpio not defined\n"); + + drm->mode_config.funcs = &sharp_memory_mode_config_funcs; + smd->mode = spi_get_device_match_data(spi); + + smd->pitch = (SHARP_ADDR_PERIOD + smd->mode->hdisplay + SHARP_DUMMY_PERIOD) / 8; + smd->tx_buffer_size = (SHARP_MODE_PERIOD + + (SHARP_ADDR_PERIOD + (smd->mode->hdisplay) + SHARP_DUMMY_PERIOD) * + smd->mode->vdisplay) / 8; + + smd->tx_buffer = devm_kzalloc(dev, smd->tx_buffer_size, GFP_KERNEL); + if (!smd->tx_buffer) + return -ENOMEM; + + mutex_init(&smd->tx_mutex); + + /* + * VCOM is a signal that prevents DC bias from being built up in + * the panel resulting in pixels being forever stuck in one state. + * + * This driver supports three different methods to generate this + * signal depending on EXTMODE pin: + * + * software (EXTMODE = L) - This mode uses a kthread to + * periodically send a "maintain display" message to the display, + * toggling the vcom bit on and off with each message + * + * external (EXTMODE = H) - This mode relies on an external + * clock to generate the signal on the EXTCOMM pin + * + * pwm (EXTMODE = H) - This mode uses a pwm device to generate + * the signal on the EXTCOMM pin + * + */ + if (device_property_read_string(dev, "sharp,vcom-mode", &vcom_mode_str)) + return dev_err_probe(dev, -EINVAL, + "Unable to find sharp,vcom-mode node in device tree\n"); + + if (!strcmp("software", vcom_mode_str)) { + smd->vcom_mode = SHARP_MEMORY_SOFTWARE_VCOM; + smd->sw_vcom_signal = kthread_run(sharp_memory_sw_vcom_signal_thread, + smd, "sw_vcom_signal"); + + } else if (!strcmp("external", vcom_mode_str)) { + smd->vcom_mode = SHARP_MEMORY_EXTERNAL_VCOM; + + } else if (!strcmp("pwm", vcom_mode_str)) { + smd->vcom_mode = SHARP_MEMORY_PWM_VCOM; + ret = sharp_memory_init_pwm_vcom_signal(smd); + if (ret) + return ret; + } else { + return dev_err_probe(dev, -EINVAL, "Invalid value set for vcom-mode\n"); + } + + drm->mode_config.min_width = smd->mode->hdisplay; + drm->mode_config.max_width = smd->mode->hdisplay; + drm->mode_config.min_height = smd->mode->vdisplay; + drm->mode_config.max_height = smd->mode->vdisplay; + + ret = sharp_memory_pipe_init(drm, smd, sharp_memory_formats, + ARRAY_SIZE(sharp_memory_formats), + NULL); + if (ret) + return dev_err_probe(dev, ret, "Failed to initialize display pipeline.\n"); + + drm_plane_enable_fb_damage_clips(&smd->plane); + drm_mode_config_reset(drm); + + ret = drm_dev_register(drm, 0); + if (ret) + return dev_err_probe(dev, ret, "Failed to register drm device.\n"); + + drm_client_setup(drm, NULL); + + return 0; +} + +static void sharp_memory_remove(struct spi_device *spi) +{ + struct sharp_memory_device *smd = spi_get_drvdata(spi); + + drm_dev_unplug(&smd->drm); + drm_atomic_helper_shutdown(&smd->drm); + + switch (smd->vcom_mode) { + case SHARP_MEMORY_SOFTWARE_VCOM: + kthread_stop(smd->sw_vcom_signal); + break; + + case SHARP_MEMORY_EXTERNAL_VCOM: + break; + + case SHARP_MEMORY_PWM_VCOM: + pwm_disable(smd->pwm_vcom_signal); + break; + } +} + +static struct spi_driver sharp_memory_spi_driver = { + .driver = { + .name = "sharp_memory", + .of_match_table = sharp_memory_of_match, + }, + .probe = sharp_memory_probe, + .remove = sharp_memory_remove, + .id_table = sharp_memory_ids, +}; +module_spi_driver(sharp_memory_spi_driver); + +MODULE_AUTHOR("Alex Lanzano <lanzano.alex@gmail.com>"); +MODULE_DESCRIPTION("SPI Protocol driver for the sharp_memory display"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c index 484901e5e2f4..4d4f05dee244 100644 --- a/drivers/gpu/drm/tiny/simpledrm.c +++ b/drivers/gpu/drm/tiny/simpledrm.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only +#include <linux/aperture.h> #include <linux/clk.h> #include <linux/of_clk.h> #include <linux/minmax.h> @@ -9,7 +10,6 @@ #include <linux/pm_domain.h> #include <linux/regulator/consumer.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_state_helper.h> #include <drm/drm_client_setup.h> @@ -883,7 +883,8 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv, if (mem) { void *screen_base; - ret = devm_aperture_acquire_from_firmware(dev, mem->start, resource_size(mem)); + ret = devm_aperture_acquire_for_platform_device(pdev, mem->start, + resource_size(mem)); if (ret) { drm_err(dev, "could not acquire memory range %pr: %d\n", mem, ret); return ERR_PTR(ret); @@ -903,7 +904,8 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv, if (!res) return ERR_PTR(-EINVAL); - ret = devm_aperture_acquire_from_firmware(dev, res->start, resource_size(res)); + ret = devm_aperture_acquire_for_platform_device(pdev, res->start, + resource_size(res)); if (ret) { drm_err(dev, "could not acquire memory range %pr: %d\n", res, ret); return ERR_PTR(ret); @@ -1064,7 +1066,7 @@ static struct platform_driver simpledrm_platform_driver = { .of_match_table = simpledrm_of_match_table, }, .probe = simpledrm_probe, - .remove_new = simpledrm_remove, + .remove = simpledrm_remove, }; module_platform_driver(simpledrm_platform_driver); diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c index b30340a2141d..c341aee37dd9 100644 --- a/drivers/gpu/drm/tve200/tve200_drv.c +++ b/drivers/gpu/drm/tve200/tve200_drv.c @@ -267,7 +267,7 @@ static struct platform_driver tve200_driver = { .of_match_table = tve200_of_match, }, .probe = tve200_probe, - .remove_new = tve200_remove, + .remove = tve200_remove, .shutdown = tve200_shutdown, }; drm_module_platform_driver(tve200_driver); diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c index 7055f7c7bcfe..73ab7dd31b17 100644 --- a/drivers/gpu/drm/v3d/v3d_bo.c +++ b/drivers/gpu/drm/v3d/v3d_bo.c @@ -157,13 +157,8 @@ struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv, struct v3d_bo *bo; int ret; - /* Let the user opt out of allocating the BOs with THP */ - if (v3d->gemfs) - shmem_obj = drm_gem_shmem_create_with_mnt(dev, unaligned_size, - v3d->gemfs); - else - shmem_obj = drm_gem_shmem_create(dev, unaligned_size); - + shmem_obj = drm_gem_shmem_create_with_mnt(dev, unaligned_size, + v3d->gemfs); if (IS_ERR(shmem_obj)) return ERR_CAST(shmem_obj); bo = to_v3d_bo(&shmem_obj->base); diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c index fb35c5c3f1a7..bee51c942a56 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.c +++ b/drivers/gpu/drm/v3d/v3d_drv.c @@ -391,7 +391,7 @@ static void v3d_platform_drm_remove(struct platform_device *pdev) static struct platform_driver v3d_platform_driver = { .probe = v3d_platform_drm_probe, - .remove_new = v3d_platform_drm_remove, + .remove = v3d_platform_drm_remove, .driver = { .name = "v3d", .of_match_table = v3d_of_match, diff --git a/drivers/gpu/drm/v3d/v3d_perfmon.c b/drivers/gpu/drm/v3d/v3d_perfmon.c index 54a486a9b74c..924814cab46a 100644 --- a/drivers/gpu/drm/v3d/v3d_perfmon.c +++ b/drivers/gpu/drm/v3d/v3d_perfmon.c @@ -254,9 +254,9 @@ void v3d_perfmon_start(struct v3d_dev *v3d, struct v3d_perfmon *perfmon) V3D_CORE_WRITE(0, V3D_V4_PCTR_0_SRC_X(source), channel); } + V3D_CORE_WRITE(0, V3D_V4_PCTR_0_EN, mask); V3D_CORE_WRITE(0, V3D_V4_PCTR_0_CLR, mask); V3D_CORE_WRITE(0, V3D_PCTR_0_OVERFLOW, mask); - V3D_CORE_WRITE(0, V3D_V4_PCTR_0_EN, mask); v3d->active_perfmon = perfmon; } @@ -306,6 +306,11 @@ void v3d_perfmon_open_file(struct v3d_file_priv *v3d_priv) static int v3d_perfmon_idr_del(int id, void *elem, void *data) { struct v3d_perfmon *perfmon = elem; + struct v3d_dev *v3d = (struct v3d_dev *)data; + + /* If the active perfmon is being destroyed, stop it first */ + if (perfmon == v3d->active_perfmon) + v3d_perfmon_stop(v3d, perfmon, false); v3d_perfmon_put(perfmon); @@ -314,8 +319,10 @@ static int v3d_perfmon_idr_del(int id, void *elem, void *data) void v3d_perfmon_close_file(struct v3d_file_priv *v3d_priv) { + struct v3d_dev *v3d = v3d_priv->v3d; + mutex_lock(&v3d_priv->perfmon.lock); - idr_for_each(&v3d_priv->perfmon.idr, v3d_perfmon_idr_del, NULL); + idr_for_each(&v3d_priv->perfmon.idr, v3d_perfmon_idr_del, v3d); idr_destroy(&v3d_priv->perfmon.idr); mutex_unlock(&v3d_priv->perfmon.lock); mutex_destroy(&v3d_priv->perfmon.lock); diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c index 49dc337b3e01..a536c467e2b2 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_drv.c +++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c @@ -7,11 +7,12 @@ * Michael Thayer <michael.thayer@oracle.com, * Hans de Goede <hdegoede@redhat.com> */ + +#include <linux/aperture.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/vt_kern.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_client_setup.h> #include <drm/drm_drv.h> @@ -45,7 +46,7 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (!vbox_check_supported(VBE_DISPI_ID_HGSMI)) return -ENODEV; - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver); + ret = aperture_remove_conflicting_pci_devices(pdev, driver.name); if (ret) return ret; diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index 575900ee67a5..ee82a959d279 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -1465,7 +1465,7 @@ static void vc4_crtc_dev_remove(struct platform_device *pdev) struct platform_driver vc4_crtc_driver = { .probe = vc4_crtc_dev_probe, - .remove_new = vc4_crtc_dev_remove, + .remove = vc4_crtc_dev_remove, .driver = { .name = "vc4_crtc", .of_match_table = vc4_crtc_dt_match, diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c index a382dc4654bd..960550c166d9 100644 --- a/drivers/gpu/drm/vc4/vc4_dpi.c +++ b/drivers/gpu/drm/vc4/vc4_dpi.c @@ -395,7 +395,7 @@ static void vc4_dpi_dev_remove(struct platform_device *pdev) struct platform_driver vc4_dpi_driver = { .probe = vc4_dpi_dev_probe, - .remove_new = vc4_dpi_dev_remove, + .remove = vc4_dpi_dev_remove, .driver = { .name = "vc4_dpi", .of_match_table = vc4_dpi_dt_match, diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index a238f76a6073..2c60d37275b0 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c @@ -20,6 +20,7 @@ * driver. */ +#include <linux/aperture.h> #include <linux/clk.h> #include <linux/component.h> #include <linux/device.h> @@ -30,7 +31,6 @@ #include <linux/platform_device.h> #include <linux/pm_runtime.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_client_setup.h> #include <drm/drm_drv.h> @@ -360,7 +360,7 @@ static int vc4_drm_bind(struct device *dev) } } - ret = drm_aperture_remove_framebuffers(driver); + ret = aperture_remove_all_conflicting_devices(driver->name); if (ret) goto err; @@ -471,7 +471,7 @@ MODULE_DEVICE_TABLE(of, vc4_of_match); static struct platform_driver vc4_platform_driver = { .probe = vc4_platform_drm_probe, - .remove_new = vc4_platform_drm_remove, + .remove = vc4_platform_drm_remove, .shutdown = vc4_platform_drm_shutdown, .driver = { .name = "vc4-drm", diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c index f5ccc1bf7a63..5eb293bdb363 100644 --- a/drivers/gpu/drm/vc4/vc4_dsi.c +++ b/drivers/gpu/drm/vc4/vc4_dsi.c @@ -1841,7 +1841,7 @@ static void vc4_dsi_dev_remove(struct platform_device *pdev) struct platform_driver vc4_dsi_driver = { .probe = vc4_dsi_dev_probe, - .remove_new = vc4_dsi_dev_remove, + .remove = vc4_dsi_dev_remove, .driver = { .name = "vc4_dsi", .of_match_table = vc4_dsi_dt_match, diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index 62b82b1eeb36..e3818c48c9b8 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -3422,7 +3422,7 @@ static const struct dev_pm_ops vc4_hdmi_pm_ops = { struct platform_driver vc4_hdmi_driver = { .probe = vc4_hdmi_dev_probe, - .remove_new = vc4_hdmi_dev_remove, + .remove = vc4_hdmi_dev_remove, .driver = { .name = "vc4_hdmi", .of_match_table = vc4_hdmi_dt_match, diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c index 1edf6e3fa7e6..70623e6b91e9 100644 --- a/drivers/gpu/drm/vc4/vc4_hvs.c +++ b/drivers/gpu/drm/vc4/vc4_hvs.c @@ -1153,7 +1153,7 @@ static const struct of_device_id vc4_hvs_dt_match[] = { struct platform_driver vc4_hvs_driver = { .probe = vc4_hvs_dev_probe, - .remove_new = vc4_hvs_dev_remove, + .remove = vc4_hvs_dev_remove, .driver = { .name = "vc4_hvs", .of_match_table = vc4_hvs_dt_match, diff --git a/drivers/gpu/drm/vc4/vc4_perfmon.c b/drivers/gpu/drm/vc4/vc4_perfmon.c index f2e56d0f6298..f1342f917cf7 100644 --- a/drivers/gpu/drm/vc4/vc4_perfmon.c +++ b/drivers/gpu/drm/vc4/vc4_perfmon.c @@ -116,6 +116,11 @@ void vc4_perfmon_open_file(struct vc4_file *vc4file) static int vc4_perfmon_idr_del(int id, void *elem, void *data) { struct vc4_perfmon *perfmon = elem; + struct vc4_dev *vc4 = (struct vc4_dev *)data; + + /* If the active perfmon is being destroyed, stop it first */ + if (perfmon == vc4->active_perfmon) + vc4_perfmon_stop(vc4, perfmon, false); vc4_perfmon_put(perfmon); @@ -130,7 +135,7 @@ void vc4_perfmon_close_file(struct vc4_file *vc4file) return; mutex_lock(&vc4file->perfmon.lock); - idr_for_each(&vc4file->perfmon.idr, vc4_perfmon_idr_del, NULL); + idr_for_each(&vc4file->perfmon.idr, vc4_perfmon_idr_del, vc4); idr_destroy(&vc4file->perfmon.idr); mutex_unlock(&vc4file->perfmon.lock); mutex_destroy(&vc4file->perfmon.lock); diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c index ffe1f7d1b911..3e38a1d2d55e 100644 --- a/drivers/gpu/drm/vc4/vc4_txp.c +++ b/drivers/gpu/drm/vc4/vc4_txp.c @@ -585,7 +585,7 @@ static const struct of_device_id vc4_txp_dt_match[] = { struct platform_driver vc4_txp_driver = { .probe = vc4_txp_probe, - .remove_new = vc4_txp_remove, + .remove = vc4_txp_remove, .driver = { .name = "vc4_txp", .of_match_table = vc4_txp_dt_match, diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c index 2423826c89eb..bb09df5000bd 100644 --- a/drivers/gpu/drm/vc4/vc4_v3d.c +++ b/drivers/gpu/drm/vc4/vc4_v3d.c @@ -534,7 +534,7 @@ const struct of_device_id vc4_v3d_dt_match[] = { struct platform_driver vc4_v3d_driver = { .probe = vc4_v3d_dev_probe, - .remove_new = vc4_v3d_dev_remove, + .remove = vc4_v3d_dev_remove, .driver = { .name = "vc4_v3d", .of_match_table = vc4_v3d_dt_match, diff --git a/drivers/gpu/drm/vc4/vc4_vec.c b/drivers/gpu/drm/vc4/vc4_vec.c index eb64e881051e..06d702e879b0 100644 --- a/drivers/gpu/drm/vc4/vc4_vec.c +++ b/drivers/gpu/drm/vc4/vc4_vec.c @@ -848,7 +848,7 @@ static void vc4_vec_dev_remove(struct platform_device *pdev) struct platform_driver vc4_vec_driver = { .probe = vc4_vec_dev_probe, - .remove_new = vc4_vec_dev_remove, + .remove = vc4_vec_dev_remove, .driver = { .name = "vc4_vec", .of_match_table = vc4_vec_dt_match, diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index 1e08d91a4d17..ffca6e2e1c9a 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -26,13 +26,13 @@ * OTHER DEALINGS IN THE SOFTWARE. */ +#include <linux/aperture.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/poll.h> #include <linux/wait.h> #include <drm/drm.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_client_setup.h> #include <drm/drm_drv.h> @@ -59,7 +59,7 @@ static int virtio_gpu_pci_quirk(struct drm_device *dev) vga ? "virtio-vga" : "virtio-gpu-pci", pname); if (vga) { - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver); + ret = aperture_remove_conflicting_pci_devices(pdev, driver.name); if (ret) return ret; } diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c index 57a5769fc994..3f0977d746be 100644 --- a/drivers/gpu/drm/vkms/vkms_composer.c +++ b/drivers/gpu/drm/vkms/vkms_composer.c @@ -187,7 +187,7 @@ static void blend(struct vkms_writeback_job *wb, const struct pixel_argb_u16 background_color = { .a = 0xffff }; - size_t crtc_y_limit = crtc_state->base.crtc->mode.vdisplay; + size_t crtc_y_limit = crtc_state->base.mode.vdisplay; /* * The planes are composed line-by-line to avoid heavy memory usage. It is a necessary @@ -270,7 +270,7 @@ static int compose_active_planes(struct vkms_writeback_job *active_wb, if (WARN_ON(check_format_funcs(crtc_state, active_wb))) return -EINVAL; - line_width = crtc_state->base.crtc->mode.hdisplay; + line_width = crtc_state->base.mode.hdisplay; stage_buffer.n_pixels = line_width; output_buffer.n_pixels = line_width; diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c index a40295c18b48..bbf080d32d2c 100644 --- a/drivers/gpu/drm/vkms/vkms_crtc.c +++ b/drivers/gpu/drm/vkms/vkms_crtc.c @@ -64,8 +64,6 @@ static int vkms_enable_vblank(struct drm_crtc *crtc) struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); struct vkms_output *out = drm_crtc_to_vkms_output(crtc); - drm_calc_timestamping_constants(crtc, &crtc->mode); - hrtimer_init(&out->vblank_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); out->vblank_hrtimer.function = &vkms_vblank_simulate; out->period_ns = ktime_set(0, vblank->framedur_ns); diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c index 0a5a185aa0b0..25a99fde126c 100644 --- a/drivers/gpu/drm/vkms/vkms_output.c +++ b/drivers/gpu/drm/vkms/vkms_output.c @@ -92,7 +92,7 @@ int vkms_output_init(struct vkms_device *vkmsdev, int index) DRM_MODE_CONNECTOR_VIRTUAL); if (ret) { DRM_ERROR("Failed to init connector\n"); - goto err_connector; + return ret; } drm_connector_helper_add(connector, &vkms_conn_helper_funcs); @@ -131,8 +131,5 @@ err_attach: err_encoder: drm_connector_cleanup(connector); -err_connector: - drm_crtc_cleanup(crtc); - return ret; } diff --git a/drivers/gpu/drm/vkms/vkms_writeback.c b/drivers/gpu/drm/vkms/vkms_writeback.c index bc724cbd5e3a..999d5c01ea81 100644 --- a/drivers/gpu/drm/vkms/vkms_writeback.c +++ b/drivers/gpu/drm/vkms/vkms_writeback.c @@ -131,8 +131,8 @@ static void vkms_wb_atomic_commit(struct drm_connector *conn, struct drm_connector_state *conn_state = wb_conn->base.state; struct vkms_crtc_state *crtc_state = output->composer_state; struct drm_framebuffer *fb = connector_state->writeback_job->fb; - u16 crtc_height = crtc_state->base.crtc->mode.vdisplay; - u16 crtc_width = crtc_state->base.crtc->mode.hdisplay; + u16 crtc_height = crtc_state->base.mode.vdisplay; + u16 crtc_width = crtc_state->base.mode.hdisplay; struct vkms_writeback_job *active_wb; struct vkms_frame_info *wb_frame_info; u32 wb_format = fb->format->format; diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.c b/drivers/gpu/drm/vmwgfx/ttm_object.c index 3353e97687d1..36d46b79562a 100644 --- a/drivers/gpu/drm/vmwgfx/ttm_object.c +++ b/drivers/gpu/drm/vmwgfx/ttm_object.c @@ -54,7 +54,7 @@ #include <linux/module.h> #include <linux/hashtable.h> -MODULE_IMPORT_NS(DMA_BUF); +MODULE_IMPORT_NS("DMA_BUF"); #define VMW_TTM_OBJECT_REF_HT_ORDER 10 @@ -471,7 +471,7 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev) */ static bool __must_check get_dma_buf_unless_doomed(struct dma_buf *dmabuf) { - return atomic_long_inc_not_zero(&dmabuf->file->f_count) != 0L; + return file_ref_get(&dmabuf->file->f_ref); } /** diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c index 890a66a2361f..64bd7d74854e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c @@ -635,10 +635,8 @@ out: kunmap_atomic(d.src_addr); if (d.dst_addr) kunmap_atomic(d.dst_addr); - if (src_pages) - kvfree(src_pages); - if (dst_pages) - kvfree(dst_pages); + kvfree(src_pages); + kvfree(dst_pages); return ret; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index cf7a89d002e4..2c46897876dd 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -35,7 +35,6 @@ #include "vmwgfx_vkms.h" #include "ttm_object.h" -#include <drm/drm_aperture.h> #include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_ttm.h> @@ -50,6 +49,8 @@ #ifdef CONFIG_X86 #include <asm/hypervisor.h> #endif + +#include <linux/aperture.h> #include <linux/cc_platform.h> #include <linux/dma-mapping.h> #include <linux/module.h> @@ -860,8 +861,6 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) bool refuse_dma = false; struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); - dev_priv->drm.dev_private = dev_priv; - vmw_sw_context_init(dev_priv); mutex_init(&dev_priv->cmdbuf_mutex); @@ -1656,7 +1655,7 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct vmw_private *vmw; int ret; - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver); + ret = aperture_remove_conflicting_pci_devices(pdev, driver.name); if (ret) goto out_error; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 3f4719b3c268..b21831ef214a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -62,7 +62,7 @@ #define VMWGFX_DRIVER_MINOR 20 #define VMWGFX_DRIVER_PATCHLEVEL 0 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) -#define VMWGFX_MAX_DISPLAYS 16 +#define VMWGFX_NUM_DISPLAY_UNITS 8 #define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768 #define VMWGFX_MIN_INITIAL_WIDTH 1280 @@ -82,7 +82,7 @@ #define VMWGFX_NUM_GB_CONTEXT 256 #define VMWGFX_NUM_GB_SHADER 20000 #define VMWGFX_NUM_GB_SURFACE 32768 -#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS +#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_NUM_DISPLAY_UNITS #define VMWGFX_NUM_DXCONTEXT 256 #define VMWGFX_NUM_DXQUERY 512 #define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\ @@ -639,7 +639,7 @@ static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) static inline struct vmw_private *vmw_priv(struct drm_device *dev) { - return (struct vmw_private *)dev->dev_private; + return container_of(dev, struct vmw_private, drm); } static inline struct vmw_private *vmw_priv_from_ttm(struct ttm_device *bdev) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 288ed0bb75cb..8db38927729b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -280,7 +280,7 @@ static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp, static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp, struct vmw_plane_state *vps) { - struct vmw_private *dev_priv = vcp->base.dev->dev_private; + struct vmw_private *dev_priv = vmw_priv(vcp->base.dev); u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h); u32 i; u32 cursor_max_dim, mob_max_size; @@ -519,7 +519,7 @@ void vmw_du_cursor_plane_destroy(struct drm_plane *plane) struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane); u32 i; - vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0); + vmw_cursor_update_position(vmw_priv(plane->dev), false, 0, 0); for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]); @@ -1265,6 +1265,8 @@ static int vmw_framebuffer_surface_create_handle(struct drm_framebuffer *fb, struct vmw_framebuffer_surface *vfbs = vmw_framebuffer_to_vfbs(fb); struct vmw_bo *bo = vmw_user_object_buffer(&vfbs->uo); + if (WARN_ON(!bo)) + return -EINVAL; return drm_gem_handle_create(file_priv, &bo->tbo.base, handle); } @@ -1283,7 +1285,6 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, { struct drm_device *dev = &dev_priv->drm; struct vmw_framebuffer_surface *vfbs; - enum SVGA3dSurfaceFormat format; struct vmw_surface *surface; int ret; @@ -1320,34 +1321,6 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, return -EINVAL; } - switch (mode_cmd->pixel_format) { - case DRM_FORMAT_ARGB8888: - format = SVGA3D_A8R8G8B8; - break; - case DRM_FORMAT_XRGB8888: - format = SVGA3D_X8R8G8B8; - break; - case DRM_FORMAT_RGB565: - format = SVGA3D_R5G6B5; - break; - case DRM_FORMAT_XRGB1555: - format = SVGA3D_A1R5G5B5; - break; - default: - DRM_ERROR("Invalid pixel format: %p4cc\n", - &mode_cmd->pixel_format); - return -EINVAL; - } - - /* - * For DX, surface format validation is done when surface->scanout - * is set. - */ - if (!has_sm4_context(dev_priv) && format != surface->metadata.format) { - DRM_ERROR("Invalid surface format for requested mode.\n"); - return -EINVAL; - } - vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL); if (!vfbs) { ret = -ENOMEM; @@ -1539,6 +1512,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, DRM_ERROR("Surface size cannot exceed %dx%d\n", dev_priv->texture_max_width, dev_priv->texture_max_height); + ret = -EINVAL; goto err_out; } @@ -2225,7 +2199,7 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, struct drm_mode_config *mode_config = &dev->mode_config; struct drm_vmw_update_layout_arg *arg = (struct drm_vmw_update_layout_arg *)data; - void __user *user_rects; + const void __user *user_rects; struct drm_vmw_rect *rects; struct drm_rect *drm_rects; unsigned rects_size; @@ -2237,6 +2211,8 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, VMWGFX_MIN_INITIAL_HEIGHT}; vmw_du_update_layout(dev_priv, 1, &def_rect); return 0; + } else if (arg->num_outputs > VMWGFX_NUM_DISPLAY_UNITS) { + return -E2BIG; } rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h index 6141fadf81ef..2a6c6d6581e0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h @@ -199,9 +199,6 @@ struct vmw_kms_dirty { s32 unit_y2; }; -#define VMWGFX_NUM_DISPLAY_UNITS 8 - - #define vmw_framebuffer_to_vfb(x) \ container_of(x, struct vmw_framebuffer, base) #define vmw_framebuffer_to_vfbs(x) \ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index fab155a68054..82d18b88f4a7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c @@ -886,6 +886,10 @@ static int vmw_stdu_connector_atomic_check(struct drm_connector *conn, struct drm_crtc_state *new_crtc_state; conn_state = drm_atomic_get_connector_state(state, conn); + + if (IS_ERR(conn_state)) + return PTR_ERR(conn_state); + du = vmw_connector_to_stdu(conn); if (!conn_state->crtc) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 1625b30d9970..5721c74da3e0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -2276,9 +2276,12 @@ int vmw_dumb_create(struct drm_file *file_priv, const struct SVGA3dSurfaceDesc *desc = vmw_surface_get_desc(format); SVGA3dSurfaceAllFlags flags = SVGA3D_SURFACE_HINT_TEXTURE | SVGA3D_SURFACE_HINT_RENDERTARGET | - SVGA3D_SURFACE_SCREENTARGET | - SVGA3D_SURFACE_BIND_SHADER_RESOURCE | - SVGA3D_SURFACE_BIND_RENDER_TARGET; + SVGA3D_SURFACE_SCREENTARGET; + + if (vmw_surface_is_dx_screen_target_format(format)) { + flags |= SVGA3D_SURFACE_BIND_SHADER_RESOURCE | + SVGA3D_SURFACE_BIND_RENDER_TARGET; + } /* * Without mob support we're just going to use raw memory buffer diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig index ebd0879e04d4..b51a2bde73e2 100644 --- a/drivers/gpu/drm/xe/Kconfig +++ b/drivers/gpu/drm/xe/Kconfig @@ -8,6 +8,7 @@ config DRM_XE select SHMEM select TMPFS select DRM_BUDDY + select DRM_CLIENT_SELECTION select DRM_EXEC select DRM_KMS_HELPER select DRM_KUNIT_TEST_HELPERS if DRM_XE_KUNIT_TEST != n @@ -50,7 +51,7 @@ config DRM_XE config DRM_XE_DISPLAY bool "Enable display support" - depends on DRM_XE && DRM_XE=m + depends on DRM_XE && DRM_XE=m && HAS_IOPORT select FB_IOMEM_HELPERS select I2C select I2C_ALGOBIT diff --git a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h index 6b30743a2f6c..37606cf8cc5e 100644 --- a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h +++ b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h @@ -352,6 +352,7 @@ enum xe_guc_klv_ids { GUC_WORKAROUND_KLV_ID_DISABLE_MTP_DURING_ASYNC_COMPUTE = 0x9007, GUC_WA_KLV_NP_RD_WRITE_TO_CLEAR_RCSM_AT_CGP_LATE_RESTORE = 0x9008, GUC_WORKAROUND_KLV_ID_BACK_TO_BACK_RCS_ENGINE_RESET = 0x9009, + GUC_WA_KLV_WAKE_POWER_DOMAINS_FOR_OUTBOUND_MMIO = 0x900a, }; #endif diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h index cb6c7598824b..9c4cf050059a 100644 --- a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h +++ b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h @@ -29,7 +29,7 @@ static inline int i915_gem_stolen_insert_node_in_range(struct xe_device *xe, bo = xe_bo_create_locked_range(xe, xe_device_get_root_tile(xe), NULL, size, start, end, - ttm_bo_type_kernel, flags); + ttm_bo_type_kernel, flags, 0); if (IS_ERR(bo)) { err = PTR_ERR(bo); bo = NULL; diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c index b8bfb666ebe8..317fa66adf18 100644 --- a/drivers/gpu/drm/xe/display/xe_display.c +++ b/drivers/gpu/drm/xe/display/xe_display.c @@ -356,6 +356,9 @@ static void __xe_display_pm_suspend(struct xe_device *xe, bool runtime) intel_opregion_suspend(display, s2idle ? PCI_D1 : PCI_D3cold); intel_dmc_suspend(display); + + if (runtime && has_display(xe)) + intel_hpd_poll_enable(xe); } void xe_display_pm_suspend(struct xe_device *xe) @@ -398,8 +401,10 @@ void xe_display_pm_runtime_suspend(struct xe_device *xe) if (!xe->info.probe_display) return; - if (xe->d3cold.allowed) + if (xe->d3cold.allowed) { __xe_display_pm_suspend(xe, true); + return; + } intel_hpd_poll_enable(xe); } @@ -481,9 +486,11 @@ static void __xe_display_pm_resume(struct xe_device *xe, bool runtime) intel_display_driver_resume(display); drm_kms_helper_poll_enable(&xe->drm); intel_display_driver_enable_user_access(display); - intel_hpd_poll_disable(xe); } + if (has_display(xe)) + intel_hpd_poll_disable(xe); + intel_opregion_resume(display); if (!runtime) @@ -502,10 +509,13 @@ void xe_display_pm_runtime_resume(struct xe_device *xe) if (!xe->info.probe_display) return; - intel_hpd_poll_disable(xe); - - if (xe->d3cold.allowed) + if (xe->d3cold.allowed) { __xe_display_pm_resume(xe, true); + return; + } + + intel_hpd_init(xe); + intel_hpd_poll_disable(xe); } diff --git a/drivers/gpu/drm/xe/display/xe_fb_pin.c b/drivers/gpu/drm/xe/display/xe_fb_pin.c index 79dbbbe03c7f..761510ae0690 100644 --- a/drivers/gpu/drm/xe/display/xe_fb_pin.c +++ b/drivers/gpu/drm/xe/display/xe_fb_pin.c @@ -79,7 +79,8 @@ write_dpt_remapped(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs, static int __xe_pin_fb_vma_dpt(const struct intel_framebuffer *fb, const struct i915_gtt_view *view, - struct i915_vma *vma) + struct i915_vma *vma, + u64 physical_alignment) { struct xe_device *xe = to_xe_device(fb->base.dev); struct xe_tile *tile0 = xe_device_get_root_tile(xe); @@ -99,23 +100,29 @@ static int __xe_pin_fb_vma_dpt(const struct intel_framebuffer *fb, XE_PAGE_SIZE); if (IS_DGFX(xe)) - dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size, - ttm_bo_type_kernel, - XE_BO_FLAG_VRAM0 | - XE_BO_FLAG_GGTT | - XE_BO_FLAG_PAGETABLE); + dpt = xe_bo_create_pin_map_at_aligned(xe, tile0, NULL, + dpt_size, ~0ull, + ttm_bo_type_kernel, + XE_BO_FLAG_VRAM0 | + XE_BO_FLAG_GGTT | + XE_BO_FLAG_PAGETABLE, + physical_alignment); else - dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size, - ttm_bo_type_kernel, - XE_BO_FLAG_STOLEN | - XE_BO_FLAG_GGTT | - XE_BO_FLAG_PAGETABLE); + dpt = xe_bo_create_pin_map_at_aligned(xe, tile0, NULL, + dpt_size, ~0ull, + ttm_bo_type_kernel, + XE_BO_FLAG_STOLEN | + XE_BO_FLAG_GGTT | + XE_BO_FLAG_PAGETABLE, + physical_alignment); if (IS_ERR(dpt)) - dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size, - ttm_bo_type_kernel, - XE_BO_FLAG_SYSTEM | - XE_BO_FLAG_GGTT | - XE_BO_FLAG_PAGETABLE); + dpt = xe_bo_create_pin_map_at_aligned(xe, tile0, NULL, + dpt_size, ~0ull, + ttm_bo_type_kernel, + XE_BO_FLAG_SYSTEM | + XE_BO_FLAG_GGTT | + XE_BO_FLAG_PAGETABLE, + physical_alignment); if (IS_ERR(dpt)) return PTR_ERR(dpt); @@ -184,7 +191,8 @@ write_ggtt_rotated(struct xe_bo *bo, struct xe_ggtt *ggtt, u32 *ggtt_ofs, u32 bo static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb, const struct i915_gtt_view *view, - struct i915_vma *vma) + struct i915_vma *vma, + u64 physical_alignment) { struct drm_gem_object *obj = intel_fb_bo(&fb->base); struct xe_bo *bo = gem_to_xe_bo(obj); @@ -266,7 +274,8 @@ out: } static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb, - const struct i915_gtt_view *view) + const struct i915_gtt_view *view, + u64 physical_alignment) { struct drm_device *dev = fb->base.dev; struct xe_device *xe = to_xe_device(dev); @@ -315,9 +324,9 @@ static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb, vma->bo = bo; if (intel_fb_uses_dpt(&fb->base)) - ret = __xe_pin_fb_vma_dpt(fb, view, vma); + ret = __xe_pin_fb_vma_dpt(fb, view, vma, physical_alignment); else - ret = __xe_pin_fb_vma_ggtt(fb, view, vma); + ret = __xe_pin_fb_vma_ggtt(fb, view, vma, physical_alignment); if (ret) goto err_unpin; @@ -358,7 +367,7 @@ intel_fb_pin_to_ggtt(const struct drm_framebuffer *fb, { *out_flags = 0; - return __xe_pin_fb_vma(to_intel_framebuffer(fb), view); + return __xe_pin_fb_vma(to_intel_framebuffer(fb), view, phys_alignment); } void intel_fb_unpin_vma(struct i915_vma *vma, unsigned long flags) @@ -372,11 +381,15 @@ int intel_plane_pin_fb(struct intel_plane_state *plane_state) struct drm_gem_object *obj = intel_fb_bo(fb); struct xe_bo *bo = gem_to_xe_bo(obj); struct i915_vma *vma; + struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + u64 phys_alignment = plane->min_alignment(plane, fb, 0); /* We reject creating !SCANOUT fb's, so this is weird.. */ drm_WARN_ON(bo->ttm.base.dev, !(bo->flags & XE_BO_FLAG_SCANOUT)); - vma = __xe_pin_fb_vma(to_intel_framebuffer(fb), &plane_state->view.gtt); + vma = __xe_pin_fb_vma(intel_fb, &plane_state->view.gtt, phys_alignment); + if (IS_ERR(vma)) return PTR_ERR(vma); diff --git a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c index 219e0b0dd07e..7c02323e9531 100644 --- a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c +++ b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c @@ -42,6 +42,7 @@ bool intel_hdcp_gsc_check_status(struct intel_display *display) struct xe_gt *gt = tile->media_gt; struct xe_gsc *gsc = >->uc.gsc; bool ret = true; + unsigned int fw_ref; if (!gsc || !xe_uc_fw_is_enabled(&gsc->fw)) { drm_dbg_kms(&xe->drm, @@ -50,7 +51,8 @@ bool intel_hdcp_gsc_check_status(struct intel_display *display) } xe_pm_runtime_get(xe); - if (xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC)) { + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC); + if (!fw_ref) { drm_dbg_kms(&xe->drm, "failed to get forcewake to check proxy status\n"); ret = false; @@ -60,7 +62,7 @@ bool intel_hdcp_gsc_check_status(struct intel_display *display) if (!xe_gsc_proxy_init_done(gsc)) ret = false; - xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC); + xe_force_wake_put(gt_to_fw(gt), fw_ref); out: xe_pm_runtime_put(xe); return ret; diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h index d428d04164d9..0c9e4b2fafab 100644 --- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h @@ -410,12 +410,6 @@ #define XE2LPM_SCRATCH3_LBCF XE_REG_MCR(0xb654) -#define XE2LPM_L3SQCREG2 XE_REG_MCR(0xb604) - -#define XE2LPM_L3SQCREG3 XE_REG_MCR(0xb608) - -#define XE2LPM_SCRATCH3_LBCF XE_REG_MCR(0xb654) - #define XE2LPM_L3SQCREG5 XE_REG_MCR(0xb658) #define XE2_TDF_CTRL XE_REG(0xb418) @@ -534,7 +528,7 @@ * [4-6] RSVD * [7] Disabled */ -#define CCS_MODE XE_REG(0x14804) +#define CCS_MODE XE_REG(0x14804, XE_REG_OPTION_MASKED) #define CCS_MODE_CSLICE_0_3_MASK REG_GENMASK(11, 0) /* 3 bits per cslice */ #define CCS_MODE_CSLICE_MASK 0x7 /* CCS0-3 + rsvd */ #define CCS_MODE_CSLICE_WIDTH ilog2(CCS_MODE_CSLICE_MASK + 1) diff --git a/drivers/gpu/drm/xe/regs/xe_guc_regs.h b/drivers/gpu/drm/xe/regs/xe_guc_regs.h index b27b73680c12..2118f7dec287 100644 --- a/drivers/gpu/drm/xe/regs/xe_guc_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_guc_regs.h @@ -84,7 +84,8 @@ #define HUC_LOADING_AGENT_GUC REG_BIT(1) #define GUC_WOPCM_OFFSET_VALID REG_BIT(0) #define GUC_MAX_IDLE_COUNT XE_REG(0xc3e4) -#define GUC_PMTIMESTAMP XE_REG(0xc3e8) +#define GUC_PMTIMESTAMP_LO XE_REG(0xc3e8) +#define GUC_PMTIMESTAMP_HI XE_REG(0xc3ec) #define GUC_SEND_INTERRUPT XE_REG(0xc4c8) #define GUC_SEND_TRIGGER REG_BIT(0) diff --git a/drivers/gpu/drm/xe/tests/xe_bo.c b/drivers/gpu/drm/xe/tests/xe_bo.c index cd811aa2b227..3e0ae40ebbd2 100644 --- a/drivers/gpu/drm/xe/tests/xe_bo.c +++ b/drivers/gpu/drm/xe/tests/xe_bo.c @@ -8,7 +8,7 @@ #include <linux/iosys-map.h> #include <linux/math64.h> -#include <linux/random.h> +#include <linux/prandom.h> #include <linux/swap.h> #include <uapi/linux/sysinfo.h> diff --git a/drivers/gpu/drm/xe/tests/xe_live_test_mod.c b/drivers/gpu/drm/xe/tests/xe_live_test_mod.c index 5f14737c8210..0d36ab864ec0 100644 --- a/drivers/gpu/drm/xe/tests/xe_live_test_mod.c +++ b/drivers/gpu/drm/xe/tests/xe_live_test_mod.c @@ -18,4 +18,4 @@ kunit_test_suite(xe_mocs_test_suite); MODULE_AUTHOR("Intel Corporation"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("xe live kunit tests"); -MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING); +MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING"); diff --git a/drivers/gpu/drm/xe/tests/xe_mocs.c b/drivers/gpu/drm/xe/tests/xe_mocs.c index ea932c051cc7..6f9b7a266b41 100644 --- a/drivers/gpu/drm/xe/tests/xe_mocs.c +++ b/drivers/gpu/drm/xe/tests/xe_mocs.c @@ -43,12 +43,11 @@ static void read_l3cc_table(struct xe_gt *gt, { struct kunit *test = kunit_get_current_test(); u32 l3cc, l3cc_expected; - unsigned int i; + unsigned int fw_ref, i; u32 reg_val; - u32 ret; - ret = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - KUNIT_ASSERT_EQ_MSG(test, ret, 0, "Forcewake Failed.\n"); + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + KUNIT_ASSERT_NE_MSG(test, fw_ref, 0, "Forcewake Failed.\n"); for (i = 0; i < info->num_mocs_regs; i++) { if (!(i & 1)) { @@ -72,7 +71,7 @@ static void read_l3cc_table(struct xe_gt *gt, KUNIT_EXPECT_EQ_MSG(test, l3cc_expected, l3cc, "l3cc idx=%u has incorrect val.\n", i); } - xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } static void read_mocs_table(struct xe_gt *gt, @@ -80,15 +79,14 @@ static void read_mocs_table(struct xe_gt *gt, { struct kunit *test = kunit_get_current_test(); u32 mocs, mocs_expected; - unsigned int i; + unsigned int fw_ref, i; u32 reg_val; - u32 ret; KUNIT_EXPECT_TRUE_MSG(test, info->unused_entries_index, "Unused entries index should have been defined\n"); - ret = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - KUNIT_ASSERT_EQ_MSG(test, ret, 0, "Forcewake Failed.\n"); + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + KUNIT_ASSERT_NE_MSG(test, fw_ref, 0, "Forcewake Failed.\n"); for (i = 0; i < info->num_mocs_regs; i++) { if (regs_are_mcr(gt)) @@ -106,7 +104,7 @@ static void read_mocs_table(struct xe_gt *gt, "mocs reg 0x%x has incorrect val.\n", i); } - xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } static int mocs_kernel_test_run_device(struct xe_device *xe) diff --git a/drivers/gpu/drm/xe/tests/xe_test_mod.c b/drivers/gpu/drm/xe/tests/xe_test_mod.c index 875f3e6f965e..93081bcf2ab0 100644 --- a/drivers/gpu/drm/xe/tests/xe_test_mod.c +++ b/drivers/gpu/drm/xe/tests/xe_test_mod.c @@ -7,4 +7,4 @@ MODULE_AUTHOR("Intel Corporation"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("xe kunit tests"); -MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING); +MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING"); diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index c74c121ea7bb..ae6b337cdc54 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -888,8 +888,8 @@ int xe_bo_evict_pinned(struct xe_bo *bo) if (WARN_ON(!xe_bo_is_pinned(bo))) return -EINVAL; - if (WARN_ON(!xe_bo_is_vram(bo))) - return -EINVAL; + if (!xe_bo_is_vram(bo)) + return 0; ret = ttm_bo_mem_space(&bo->ttm, &placement, &new_mem, &ctx); if (ret) @@ -939,6 +939,7 @@ int xe_bo_restore_pinned(struct xe_bo *bo) .interruptible = false, }; struct ttm_resource *new_mem; + struct ttm_place *place = &bo->placements[0]; int ret; xe_bo_assert_held(bo); @@ -949,9 +950,15 @@ int xe_bo_restore_pinned(struct xe_bo *bo) if (WARN_ON(!xe_bo_is_pinned(bo))) return -EINVAL; - if (WARN_ON(xe_bo_is_vram(bo) || !bo->ttm.ttm)) + if (WARN_ON(xe_bo_is_vram(bo))) + return -EINVAL; + + if (WARN_ON(!bo->ttm.ttm && !xe_bo_is_stolen(bo))) return -EINVAL; + if (!mem_type_is_vram(place->mem_type)) + return 0; + ret = ttm_bo_mem_space(&bo->ttm, &bo->placement, &new_mem, &ctx); if (ret) return ret; @@ -1454,7 +1461,8 @@ static struct xe_bo * __xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile, struct xe_vm *vm, size_t size, u64 start, u64 end, - u16 cpu_caching, enum ttm_bo_type type, u32 flags) + u16 cpu_caching, enum ttm_bo_type type, u32 flags, + u64 alignment) { struct xe_bo *bo = NULL; int err; @@ -1483,6 +1491,8 @@ __xe_bo_create_locked(struct xe_device *xe, if (IS_ERR(bo)) return bo; + bo->min_align = alignment; + /* * Note that instead of taking a reference no the drm_gpuvm_resv_bo(), * to ensure the shared resv doesn't disappear under the bo, the bo @@ -1523,16 +1533,18 @@ struct xe_bo * xe_bo_create_locked_range(struct xe_device *xe, struct xe_tile *tile, struct xe_vm *vm, size_t size, u64 start, u64 end, - enum ttm_bo_type type, u32 flags) + enum ttm_bo_type type, u32 flags, u64 alignment) { - return __xe_bo_create_locked(xe, tile, vm, size, start, end, 0, type, flags); + return __xe_bo_create_locked(xe, tile, vm, size, start, end, 0, type, + flags, alignment); } struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile, struct xe_vm *vm, size_t size, enum ttm_bo_type type, u32 flags) { - return __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, 0, type, flags); + return __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, 0, type, + flags, 0); } struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile, @@ -1542,7 +1554,7 @@ struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile, { struct xe_bo *bo = __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, cpu_caching, ttm_bo_type_device, - flags | XE_BO_FLAG_USER); + flags | XE_BO_FLAG_USER, 0); if (!IS_ERR(bo)) xe_bo_unlock_vm_held(bo); @@ -1566,6 +1578,17 @@ struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile size_t size, u64 offset, enum ttm_bo_type type, u32 flags) { + return xe_bo_create_pin_map_at_aligned(xe, tile, vm, size, offset, + type, flags, 0); +} + +struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe, + struct xe_tile *tile, + struct xe_vm *vm, + size_t size, u64 offset, + enum ttm_bo_type type, u32 flags, + u64 alignment) +{ struct xe_bo *bo; int err; u64 start = offset == ~0ull ? 0 : offset; @@ -1576,7 +1599,8 @@ struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile flags |= XE_BO_FLAG_GGTT; bo = xe_bo_create_locked_range(xe, tile, vm, size, start, end, type, - flags | XE_BO_FLAG_NEEDS_CPU_ACCESS); + flags | XE_BO_FLAG_NEEDS_CPU_ACCESS, + alignment); if (IS_ERR(bo)) return bo; @@ -1749,6 +1773,7 @@ int xe_bo_pin_external(struct xe_bo *bo) int xe_bo_pin(struct xe_bo *bo) { + struct ttm_place *place = &bo->placements[0]; struct xe_device *xe = xe_bo_device(bo); int err; @@ -1779,21 +1804,21 @@ int xe_bo_pin(struct xe_bo *bo) */ if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) && bo->flags & XE_BO_FLAG_INTERNAL_TEST)) { - struct ttm_place *place = &(bo->placements[0]); - if (mem_type_is_vram(place->mem_type)) { xe_assert(xe, place->flags & TTM_PL_FLAG_CONTIGUOUS); place->fpfn = (xe_bo_addr(bo, 0, PAGE_SIZE) - vram_region_gpu_offset(bo->ttm.resource)) >> PAGE_SHIFT; place->lpfn = place->fpfn + (bo->size >> PAGE_SHIFT); - - spin_lock(&xe->pinned.lock); - list_add_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present); - spin_unlock(&xe->pinned.lock); } } + if (mem_type_is_vram(place->mem_type) || bo->flags & XE_BO_FLAG_GGTT) { + spin_lock(&xe->pinned.lock); + list_add_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present); + spin_unlock(&xe->pinned.lock); + } + ttm_bo_pin(&bo->ttm); /* @@ -1839,23 +1864,18 @@ void xe_bo_unpin_external(struct xe_bo *bo) void xe_bo_unpin(struct xe_bo *bo) { + struct ttm_place *place = &bo->placements[0]; struct xe_device *xe = xe_bo_device(bo); xe_assert(xe, !bo->ttm.base.import_attach); xe_assert(xe, xe_bo_is_pinned(bo)); - if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) && - bo->flags & XE_BO_FLAG_INTERNAL_TEST)) { - struct ttm_place *place = &(bo->placements[0]); - - if (mem_type_is_vram(place->mem_type)) { - spin_lock(&xe->pinned.lock); - xe_assert(xe, !list_empty(&bo->pinned_link)); - list_del_init(&bo->pinned_link); - spin_unlock(&xe->pinned.lock); - } + if (mem_type_is_vram(place->mem_type) || bo->flags & XE_BO_FLAG_GGTT) { + spin_lock(&xe->pinned.lock); + xe_assert(xe, !list_empty(&bo->pinned_link)); + list_del_init(&bo->pinned_link); + spin_unlock(&xe->pinned.lock); } - ttm_bo_unpin(&bo->ttm); } diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h index 31f4ba3bd8c1..7fa44a0138b0 100644 --- a/drivers/gpu/drm/xe/xe_bo.h +++ b/drivers/gpu/drm/xe/xe_bo.h @@ -77,7 +77,7 @@ struct xe_bo * xe_bo_create_locked_range(struct xe_device *xe, struct xe_tile *tile, struct xe_vm *vm, size_t size, u64 start, u64 end, - enum ttm_bo_type type, u32 flags); + enum ttm_bo_type type, u32 flags, u64 alignment); struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile, struct xe_vm *vm, size_t size, enum ttm_bo_type type, u32 flags); @@ -94,6 +94,12 @@ struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile, struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile, struct xe_vm *vm, size_t size, u64 offset, enum ttm_bo_type type, u32 flags); +struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe, + struct xe_tile *tile, + struct xe_vm *vm, + size_t size, u64 offset, + enum ttm_bo_type type, u32 flags, + u64 alignment); struct xe_bo *xe_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile, const void *data, size_t size, enum ttm_bo_type type, u32 flags); diff --git a/drivers/gpu/drm/xe/xe_bo_evict.c b/drivers/gpu/drm/xe/xe_bo_evict.c index 541b49007d73..8fb2be061003 100644 --- a/drivers/gpu/drm/xe/xe_bo_evict.c +++ b/drivers/gpu/drm/xe/xe_bo_evict.c @@ -34,14 +34,22 @@ int xe_bo_evict_all(struct xe_device *xe) u8 id; int ret; - if (!IS_DGFX(xe)) - return 0; - /* User memory */ - for (mem_type = XE_PL_VRAM0; mem_type <= XE_PL_VRAM1; ++mem_type) { + for (mem_type = XE_PL_TT; mem_type <= XE_PL_VRAM1; ++mem_type) { struct ttm_resource_manager *man = ttm_manager_type(bdev, mem_type); + /* + * On igpu platforms with flat CCS we need to ensure we save and restore any CCS + * state since this state lives inside graphics stolen memory which doesn't survive + * hibernation. + * + * This can be further improved by only evicting objects that we know have actually + * used a compression enabled PAT index. + */ + if (mem_type == XE_PL_TT && (IS_DGFX(xe) || !xe_device_has_flat_ccs(xe))) + continue; + if (man) { ret = ttm_resource_manager_evict_all(bdev, man); if (ret) @@ -125,9 +133,6 @@ int xe_bo_restore_kernel(struct xe_device *xe) struct xe_bo *bo; int ret; - if (!IS_DGFX(xe)) - return 0; - spin_lock(&xe->pinned.lock); for (;;) { bo = list_first_entry_or_null(&xe->pinned.evicted, @@ -159,7 +164,6 @@ int xe_bo_restore_kernel(struct xe_device *xe) * should setup the iosys map. */ xe_assert(xe, !iosys_map_is_null(&bo->vmap)); - xe_assert(xe, xe_bo_is_vram(bo)); xe_bo_put(bo); diff --git a/drivers/gpu/drm/xe/xe_bo_types.h b/drivers/gpu/drm/xe/xe_bo_types.h index 8b9201775081..13c6d8a69e91 100644 --- a/drivers/gpu/drm/xe/xe_bo_types.h +++ b/drivers/gpu/drm/xe/xe_bo_types.h @@ -76,6 +76,11 @@ struct xe_bo { /** @vram_userfault_link: Link into @mem_access.vram_userfault.list */ struct list_head vram_userfault_link; + + /** @min_align: minimum alignment needed for this BO if different + * from default + */ + u64 min_align; }; #endif diff --git a/drivers/gpu/drm/xe/xe_debugfs.c b/drivers/gpu/drm/xe/xe_debugfs.c index fe4319eb13fd..492b4877433f 100644 --- a/drivers/gpu/drm/xe/xe_debugfs.c +++ b/drivers/gpu/drm/xe/xe_debugfs.c @@ -90,13 +90,32 @@ static int forcewake_open(struct inode *inode, struct file *file) { struct xe_device *xe = inode->i_private; struct xe_gt *gt; - u8 id; + u8 id, last_gt; + unsigned int fw_ref; xe_pm_runtime_get(xe); - for_each_gt(gt, xe, id) - XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL)); + for_each_gt(gt, xe, id) { + last_gt = id; + + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) + goto err_fw_get; + } return 0; + +err_fw_get: + for_each_gt(gt, xe, id) { + if (id < last_gt) + xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL); + else if (id == last_gt) + xe_force_wake_put(gt_to_fw(gt), fw_ref); + else + break; + } + + xe_pm_runtime_put(xe); + return -ETIMEDOUT; } static int forcewake_release(struct inode *inode, struct file *file) @@ -106,7 +125,7 @@ static int forcewake_release(struct inode *inode, struct file *file) u8 id; for_each_gt(gt, xe, id) - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); + xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL); xe_pm_runtime_put(xe); return 0; diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c index 99842a35dbf0..f8947e7e917e 100644 --- a/drivers/gpu/drm/xe/xe_devcoredump.c +++ b/drivers/gpu/drm/xe/xe_devcoredump.c @@ -23,6 +23,7 @@ #include "xe_guc_submit.h" #include "xe_hw_engine.h" #include "xe_module.h" +#include "xe_pm.h" #include "xe_sched_job.h" #include "xe_vm.h" @@ -154,29 +155,6 @@ static void xe_devcoredump_snapshot_free(struct xe_devcoredump_snapshot *ss) ss->vm = NULL; } -static void xe_devcoredump_deferred_snap_work(struct work_struct *work) -{ - struct xe_devcoredump_snapshot *ss = container_of(work, typeof(*ss), work); - struct xe_devcoredump *coredump = container_of(ss, typeof(*coredump), snapshot); - - /* keep going if fw fails as we still want to save the memory and SW data */ - if (xe_force_wake_get(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL)) - xe_gt_info(ss->gt, "failed to get forcewake for coredump capture\n"); - xe_vm_snapshot_capture_delayed(ss->vm); - xe_guc_exec_queue_snapshot_capture_delayed(ss->ge); - xe_force_wake_put(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL); - - /* Calculate devcoredump size */ - ss->read.size = __xe_devcoredump_read(NULL, INT_MAX, coredump); - - ss->read.buffer = kvmalloc(ss->read.size, GFP_USER); - if (!ss->read.buffer) - return; - - __xe_devcoredump_read(ss->read.buffer, ss->read.size, coredump); - xe_devcoredump_snapshot_free(ss); -} - static ssize_t xe_devcoredump_read(char *buffer, loff_t offset, size_t count, void *data, size_t datalen) { @@ -226,6 +204,45 @@ static void xe_devcoredump_free(void *data) "Xe device coredump has been deleted.\n"); } +static void xe_devcoredump_deferred_snap_work(struct work_struct *work) +{ + struct xe_devcoredump_snapshot *ss = container_of(work, typeof(*ss), work); + struct xe_devcoredump *coredump = container_of(ss, typeof(*coredump), snapshot); + struct xe_device *xe = coredump_to_xe(coredump); + unsigned int fw_ref; + + /* + * NB: Despite passing a GFP_ flags parameter here, more allocations are done + * internally using GFP_KERNEL expliictly. Hence this call must be in the worker + * thread and not in the initial capture call. + */ + dev_coredumpm_timeout(gt_to_xe(ss->gt)->drm.dev, THIS_MODULE, coredump, 0, GFP_KERNEL, + xe_devcoredump_read, xe_devcoredump_free, + XE_COREDUMP_TIMEOUT_JIFFIES); + + xe_pm_runtime_get(xe); + + /* keep going if fw fails as we still want to save the memory and SW data */ + fw_ref = xe_force_wake_get(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) + xe_gt_info(ss->gt, "failed to get forcewake for coredump capture\n"); + xe_vm_snapshot_capture_delayed(ss->vm); + xe_guc_exec_queue_snapshot_capture_delayed(ss->ge); + xe_force_wake_put(gt_to_fw(ss->gt), fw_ref); + + xe_pm_runtime_put(xe); + + /* Calculate devcoredump size */ + ss->read.size = __xe_devcoredump_read(NULL, INT_MAX, coredump); + + ss->read.buffer = kvmalloc(ss->read.size, GFP_USER); + if (!ss->read.buffer) + return; + + __xe_devcoredump_read(ss->read.buffer, ss->read.size, coredump); + xe_devcoredump_snapshot_free(ss); +} + static void devcoredump_snapshot(struct xe_devcoredump *coredump, struct xe_sched_job *job) { @@ -236,8 +253,9 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump, u32 width_mask = (0x1 << q->width) - 1; const char *process_name = "no process"; - int i; + unsigned int fw_ref; bool cookie; + int i; ss->snapshot_time = ktime_get_real(); ss->boot_time = ktime_get_boottime(); @@ -261,11 +279,10 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump, } /* keep going if fw fails as we still want to save the memory and SW data */ - if (xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL)) - xe_gt_info(ss->gt, "failed to get forcewake for coredump capture\n"); + fw_ref = xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL); ss->guc.log = xe_guc_log_snapshot_capture(&guc->log, true); - ss->guc.ct = xe_guc_ct_snapshot_capture(&guc->ct, true); + ss->guc.ct = xe_guc_ct_snapshot_capture(&guc->ct); ss->ge = xe_guc_exec_queue_snapshot_capture(q); ss->job = xe_sched_job_snapshot_capture(job); ss->vm = xe_vm_snapshot_capture(q->vm); @@ -274,7 +291,7 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump, queue_work(system_unbound_wq, &ss->work); - xe_force_wake_put(gt_to_fw(q->gt), XE_FORCEWAKE_ALL); + xe_force_wake_put(gt_to_fw(q->gt), fw_ref); dma_fence_end_signalling(cookie); } @@ -302,10 +319,6 @@ void xe_devcoredump(struct xe_sched_job *job) drm_info(&xe->drm, "Xe device coredump has been created\n"); drm_info(&xe->drm, "Check your /sys/class/drm/card%d/device/devcoredump/data\n", xe->drm.primary->index); - - dev_coredumpm_timeout(xe->drm.dev, THIS_MODULE, coredump, 0, GFP_KERNEL, - xe_devcoredump_read, xe_devcoredump_free, - XE_COREDUMP_TIMEOUT_JIFFIES); } static void xe_driver_devcoredump_fini(void *arg) diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index 962751c966d1..06d6db8b50f9 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -5,11 +5,11 @@ #include "xe_device.h" +#include <linux/aperture.h> #include <linux/delay.h> #include <linux/fault-inject.h> #include <linux/units.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_client.h> #include <drm/drm_gem_ttm_helper.h> @@ -88,10 +88,6 @@ static int xe_file_open(struct drm_device *dev, struct drm_file *file) mutex_init(&xef->exec_queue.lock); xa_init_flags(&xef->exec_queue.xa, XA_FLAGS_ALLOC1); - spin_lock(&xe->clients.lock); - xe->clients.count++; - spin_unlock(&xe->clients.lock); - file->driver_priv = xef; kref_init(&xef->refcount); @@ -108,17 +104,12 @@ static int xe_file_open(struct drm_device *dev, struct drm_file *file) static void xe_file_destroy(struct kref *ref) { struct xe_file *xef = container_of(ref, struct xe_file, refcount); - struct xe_device *xe = xef->xe; xa_destroy(&xef->exec_queue.xa); mutex_destroy(&xef->exec_queue.lock); xa_destroy(&xef->vm.xa); mutex_destroy(&xef->vm.lock); - spin_lock(&xe->clients.lock); - xe->clients.count--; - spin_unlock(&xe->clients.lock); - xe_drm_client_put(xef->client); kfree(xef->process_name); kfree(xef); @@ -311,7 +302,7 @@ struct xe_device *xe_device_create(struct pci_dev *pdev, xe_display_driver_set_hooks(&driver); - err = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver); + err = aperture_remove_conflicting_pci_devices(pdev, driver.name); if (err) return ERR_PTR(err); @@ -334,7 +325,6 @@ struct xe_device *xe_device_create(struct pci_dev *pdev, xe->info.force_execlist = xe_modparam.force_execlist; spin_lock_init(&xe->irq.lock); - spin_lock_init(&xe->clients.lock); init_waitqueue_head(&xe->ufence_wq); @@ -360,7 +350,8 @@ struct xe_device *xe_device_create(struct pci_dev *pdev, INIT_LIST_HEAD(&xe->pinned.external_vram); INIT_LIST_HEAD(&xe->pinned.evicted); - xe->preempt_fence_wq = alloc_ordered_workqueue("xe-preempt-fence-wq", 0); + xe->preempt_fence_wq = alloc_ordered_workqueue("xe-preempt-fence-wq", + WQ_MEM_RECLAIM); xe->ordered_wq = alloc_ordered_workqueue("xe-ordered-wq", 0); xe->unordered_wq = alloc_workqueue("xe-unordered-wq", 0, 0); xe->destroy_wq = alloc_workqueue("xe-destroy-wq", 0, 0); @@ -604,8 +595,8 @@ int xe_device_probe_early(struct xe_device *xe) static int probe_has_flat_ccs(struct xe_device *xe) { struct xe_gt *gt; + unsigned int fw_ref; u32 reg; - int err; /* Always enabled/disabled, no runtime check to do */ if (GRAPHICS_VER(xe) < 20 || !xe->info.has_flat_ccs) @@ -613,9 +604,9 @@ static int probe_has_flat_ccs(struct xe_device *xe) gt = xe_root_mmio_gt(xe); - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (err) - return err; + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) + return -ETIMEDOUT; reg = xe_gt_mcr_unicast_read_any(gt, XE2_FLAT_CCS_BASE_RANGE_LOWER); xe->info.has_flat_ccs = (reg & XE2_FLAT_CCS_ENABLE); @@ -624,7 +615,8 @@ static int probe_has_flat_ccs(struct xe_device *xe) drm_dbg(&xe->drm, "Flat CCS has been disabled in bios, May lead to performance impact"); - return xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + xe_force_wake_put(gt_to_fw(gt), fw_ref); + return 0; } int xe_device_probe(struct xe_device *xe) @@ -875,6 +867,7 @@ void xe_device_wmb(struct xe_device *xe) void xe_device_td_flush(struct xe_device *xe) { struct xe_gt *gt; + unsigned int fw_ref; u8 id; if (!IS_DGFX(xe) || GRAPHICS_VER(xe) < 20) @@ -889,7 +882,8 @@ void xe_device_td_flush(struct xe_device *xe) if (xe_gt_is_media_type(gt)) continue; - if (xe_force_wake_get(gt_to_fw(gt), XE_FW_GT)) + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) return; xe_mmio_write32(>->mmio, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST); @@ -904,32 +898,32 @@ void xe_device_td_flush(struct xe_device *xe) 150, NULL, false)) xe_gt_err_once(gt, "TD flush timeout\n"); - xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } } void xe_device_l2_flush(struct xe_device *xe) { struct xe_gt *gt; - int err; + unsigned int fw_ref; gt = xe_root_mmio_gt(xe); if (!XE_WA(gt, 16023588340)) return; - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (err) + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) return; spin_lock(>->global_invl_lock); xe_mmio_write32(>->mmio, XE2_GLOBAL_INVAL, 0x1); - if (xe_mmio_wait32(>->mmio, XE2_GLOBAL_INVAL, 0x1, 0x0, 150, NULL, true)) + if (xe_mmio_wait32(>->mmio, XE2_GLOBAL_INVAL, 0x1, 0x0, 500, NULL, true)) xe_gt_err_once(gt, "Global invalidation timeout\n"); spin_unlock(>->global_invl_lock); - xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size) diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h index 4c3f0ebe78a9..f1fbfe916867 100644 --- a/drivers/gpu/drm/xe/xe_device.h +++ b/drivers/gpu/drm/xe/xe_device.h @@ -191,4 +191,18 @@ void xe_device_declare_wedged(struct xe_device *xe); struct xe_file *xe_file_get(struct xe_file *xef); void xe_file_put(struct xe_file *xef); +/* + * Occasionally it is seen that the G2H worker starts running after a delay of more than + * a second even after being queued and activated by the Linux workqueue subsystem. This + * leads to G2H timeout error. The root cause of issue lies with scheduling latency of + * Lunarlake Hybrid CPU. Issue disappears if we disable Lunarlake atom cores from BIOS + * and this is beyond xe kmd. + * + * TODO: Drop this change once workqueue scheduling delay issue is fixed on LNL Hybrid CPU. + */ +#define LNL_FLUSH_WORKQUEUE(wq__) \ + flush_workqueue(wq__) +#define LNL_FLUSH_WORK(wrk__) \ + flush_work(wrk__) + #endif diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h index 85bede4dd646..b9ea455d6f59 100644 --- a/drivers/gpu/drm/xe/xe_device_types.h +++ b/drivers/gpu/drm/xe/xe_device_types.h @@ -379,15 +379,6 @@ struct xe_device { struct workqueue_struct *wq; } sriov; - /** @clients: drm clients info */ - struct { - /** @clients.lock: Protects drm clients info */ - spinlock_t lock; - - /** @clients.count: number of drm clients */ - u64 count; - } clients; - /** @usm: unified memory state */ struct { /** @usm.asid: convert a ASID to VM */ diff --git a/drivers/gpu/drm/xe/xe_dma_buf.c b/drivers/gpu/drm/xe/xe_dma_buf.c index 68f309f5e981..c5b95470fa32 100644 --- a/drivers/gpu/drm/xe/xe_dma_buf.c +++ b/drivers/gpu/drm/xe/xe_dma_buf.c @@ -20,7 +20,7 @@ #include "xe_ttm_vram_mgr.h" #include "xe_vm.h" -MODULE_IMPORT_NS(DMA_BUF); +MODULE_IMPORT_NS("DMA_BUF"); static int xe_dma_buf_attach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) diff --git a/drivers/gpu/drm/xe/xe_drm_client.c b/drivers/gpu/drm/xe/xe_drm_client.c index fb52a23e28f8..22f0f1a6dfd5 100644 --- a/drivers/gpu/drm/xe/xe_drm_client.c +++ b/drivers/gpu/drm/xe/xe_drm_client.c @@ -278,6 +278,7 @@ static void show_run_ticks(struct drm_printer *p, struct drm_file *file) struct xe_hw_engine *hwe; struct xe_exec_queue *q; u64 gpu_timestamp; + unsigned int fw_ref; xe_pm_runtime_get(xe); @@ -303,13 +304,16 @@ static void show_run_ticks(struct drm_printer *p, struct drm_file *file) continue; fw = xe_hw_engine_to_fw_domain(hwe); - if (xe_force_wake_get(gt_to_fw(gt), fw)) { + + fw_ref = xe_force_wake_get(gt_to_fw(gt), fw); + if (!xe_force_wake_ref_has_domain(fw_ref, fw)) { hwe = NULL; + xe_force_wake_put(gt_to_fw(gt), fw_ref); break; } gpu_timestamp = xe_hw_engine_read_timestamp(hwe); - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), fw)); + xe_force_wake_put(gt_to_fw(gt), fw_ref); break; } diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c index 7b38485817dc..31cca938956f 100644 --- a/drivers/gpu/drm/xe/xe_exec.c +++ b/drivers/gpu/drm/xe/xe_exec.c @@ -41,11 +41,6 @@ * user knows an exec writes to a BO and reads from the BO in the next exec, it * is the user's responsibility to pass in / out fence between the two execs). * - * Implicit dependencies for external BOs are handled by using the dma-buf - * implicit dependency uAPI (TODO: add link). To make this works each exec must - * install the job's fence into the DMA_RESV_USAGE_WRITE slot of every external - * BO mapped in the VM. - * * We do not allow a user to trigger a bind at exec time rather we have a VM * bind IOCTL which uses the same in / out fence interface as exec. In that * sense, a VM bind is basically the same operation as an exec from the user @@ -59,8 +54,8 @@ * behind any pending kernel operations on any external BOs in VM or any BOs * private to the VM. This is accomplished by the rebinds waiting on BOs * DMA_RESV_USAGE_KERNEL slot (kernel ops) and kernel ops waiting on all BOs - * slots (inflight execs are in the DMA_RESV_USAGE_BOOKING for private BOs and - * in DMA_RESV_USAGE_WRITE for external BOs). + * slots (inflight execs are in the DMA_RESV_USAGE_BOOKKEEP for private BOs and + * for external BOs). * * Rebinds / dma-resv usage applies to non-compute mode VMs only as for compute * mode VMs we use preempt fences and a rebind worker (TODO: add link). @@ -137,12 +132,16 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file) if (XE_IOCTL_DBG(xe, !q)) return -ENOENT; - if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_VM)) - return -EINVAL; + if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_VM)) { + err = -EINVAL; + goto err_exec_queue; + } if (XE_IOCTL_DBG(xe, args->num_batch_buffer && - q->width != args->num_batch_buffer)) - return -EINVAL; + q->width != args->num_batch_buffer)) { + err = -EINVAL; + goto err_exec_queue; + } if (XE_IOCTL_DBG(xe, q->ops->reset_status(q))) { err = -ECANCELED; @@ -204,14 +203,14 @@ retry: write_locked = false; } if (err) - goto err_syncs; + goto err_hw_exec_mode; if (write_locked) { err = xe_vm_userptr_pin(vm); downgrade_write(&vm->lock); write_locked = false; if (err) - goto err_hw_exec_mode; + goto err_unlock_list; } if (!args->num_batch_buffer) { @@ -225,6 +224,7 @@ retry: fence = xe_sync_in_fence_get(syncs, num_syncs, q, vm); if (IS_ERR(fence)) { err = PTR_ERR(fence); + xe_vm_unlock(vm); goto err_unlock_list; } for (i = 0; i < num_syncs; i++) @@ -304,7 +304,8 @@ retry: xe_sched_job_arm(job); if (!xe_vm_in_lr_mode(vm)) drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, &job->drm.s_fence->finished, - DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_WRITE); + DMA_RESV_USAGE_BOOKKEEP, + DMA_RESV_USAGE_BOOKKEEP); for (i = 0; i < num_syncs; i++) { xe_sync_entry_signal(&syncs[i], &job->drm.s_fence->finished); diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c index d098d2dd1b2d..fd0f3b3c9101 100644 --- a/drivers/gpu/drm/xe/xe_exec_queue.c +++ b/drivers/gpu/drm/xe/xe_exec_queue.c @@ -260,8 +260,14 @@ void xe_exec_queue_fini(struct xe_exec_queue *q) { int i; + /* + * Before releasing our ref to lrc and xef, accumulate our run ticks + */ + xe_exec_queue_update_run_ticks(q); + for (i = 0; i < q->width; ++i) xe_lrc_put(q->lrc[i]); + __xe_exec_queue_free(q); } diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h index 7deb480e26af..1158b6062a6c 100644 --- a/drivers/gpu/drm/xe/xe_exec_queue_types.h +++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h @@ -143,7 +143,7 @@ struct xe_exec_queue { /** @hw_engine_group_link: link into exec queues in the same hw engine group */ struct list_head hw_engine_group_link; /** @lrc: logical ring context for this exec queue */ - struct xe_lrc *lrc[]; + struct xe_lrc *lrc[] __counted_by(width); }; /** diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c index f3b71fe7a96d..a8c416a48812 100644 --- a/drivers/gpu/drm/xe/xe_execlist.c +++ b/drivers/gpu/drm/xe/xe_execlist.c @@ -313,7 +313,7 @@ execlist_run_job(struct drm_sched_job *drm_job) q->ring_ops->emit_job(job); xe_execlist_make_active(exl); - return dma_fence_get(job->fence); + return job->fence; } static void execlist_job_free(struct drm_sched_job *drm_job) diff --git a/drivers/gpu/drm/xe/xe_force_wake.c b/drivers/gpu/drm/xe/xe_force_wake.c index a64c14757c84..4f6784e5abf8 100644 --- a/drivers/gpu/drm/xe/xe_force_wake.c +++ b/drivers/gpu/drm/xe/xe_force_wake.c @@ -21,15 +21,25 @@ static const char *str_wake_sleep(bool wake) return wake ? "wake" : "sleep"; } -static void domain_init(struct xe_force_wake_domain *domain, +static void mark_domain_initialized(struct xe_force_wake *fw, + enum xe_force_wake_domain_id id) +{ + fw->initialized_domains |= BIT(id); +} + +static void init_domain(struct xe_force_wake *fw, enum xe_force_wake_domain_id id, struct xe_reg reg, struct xe_reg ack) { + struct xe_force_wake_domain *domain = &fw->domains[id]; + domain->id = id; domain->reg_ctl = reg; domain->reg_ack = ack; domain->val = FORCEWAKE_MT(FORCEWAKE_KERNEL); domain->mask = FORCEWAKE_MT_MASK(FORCEWAKE_KERNEL); + + mark_domain_initialized(fw, id); } void xe_force_wake_init_gt(struct xe_gt *gt, struct xe_force_wake *fw) @@ -43,13 +53,11 @@ void xe_force_wake_init_gt(struct xe_gt *gt, struct xe_force_wake *fw) xe_gt_assert(gt, GRAPHICS_VER(gt_to_xe(gt)) >= 11); if (xe->info.graphics_verx100 >= 1270) { - domain_init(&fw->domains[XE_FW_DOMAIN_ID_GT], - XE_FW_DOMAIN_ID_GT, + init_domain(fw, XE_FW_DOMAIN_ID_GT, FORCEWAKE_GT, FORCEWAKE_ACK_GT_MTL); } else { - domain_init(&fw->domains[XE_FW_DOMAIN_ID_GT], - XE_FW_DOMAIN_ID_GT, + init_domain(fw, XE_FW_DOMAIN_ID_GT, FORCEWAKE_GT, FORCEWAKE_ACK_GT); } @@ -63,8 +71,7 @@ void xe_force_wake_init_engines(struct xe_gt *gt, struct xe_force_wake *fw) xe_gt_assert(gt, GRAPHICS_VER(gt_to_xe(gt)) >= 11); if (!xe_gt_is_media_type(gt)) - domain_init(&fw->domains[XE_FW_DOMAIN_ID_RENDER], - XE_FW_DOMAIN_ID_RENDER, + init_domain(fw, XE_FW_DOMAIN_ID_RENDER, FORCEWAKE_RENDER, FORCEWAKE_ACK_RENDER); @@ -72,8 +79,7 @@ void xe_force_wake_init_engines(struct xe_gt *gt, struct xe_force_wake *fw) if (!(gt->info.engine_mask & BIT(i))) continue; - domain_init(&fw->domains[XE_FW_DOMAIN_ID_MEDIA_VDBOX0 + j], - XE_FW_DOMAIN_ID_MEDIA_VDBOX0 + j, + init_domain(fw, XE_FW_DOMAIN_ID_MEDIA_VDBOX0 + j, FORCEWAKE_MEDIA_VDBOX(j), FORCEWAKE_ACK_MEDIA_VDBOX(j)); } @@ -82,15 +88,13 @@ void xe_force_wake_init_engines(struct xe_gt *gt, struct xe_force_wake *fw) if (!(gt->info.engine_mask & BIT(i))) continue; - domain_init(&fw->domains[XE_FW_DOMAIN_ID_MEDIA_VEBOX0 + j], - XE_FW_DOMAIN_ID_MEDIA_VEBOX0 + j, + init_domain(fw, XE_FW_DOMAIN_ID_MEDIA_VEBOX0 + j, FORCEWAKE_MEDIA_VEBOX(j), FORCEWAKE_ACK_MEDIA_VEBOX(j)); } if (gt->info.engine_mask & BIT(XE_HW_ENGINE_GSCCS0)) - domain_init(&fw->domains[XE_FW_DOMAIN_ID_GSC], - XE_FW_DOMAIN_ID_GSC, + init_domain(fw, XE_FW_DOMAIN_ID_GSC, FORCEWAKE_GSC, FORCEWAKE_ACK_GSC); } @@ -115,9 +119,15 @@ static int __domain_wait(struct xe_gt *gt, struct xe_force_wake_domain *domain, XE_FORCE_WAKE_ACK_TIMEOUT_MS * USEC_PER_MSEC, &value, true); if (ret) - xe_gt_notice(gt, "Force wake domain %d failed to ack %s (%pe) reg[%#x] = %#x\n", - domain->id, str_wake_sleep(wake), ERR_PTR(ret), - domain->reg_ack.addr, value); + xe_gt_err(gt, "Force wake domain %d failed to ack %s (%pe) reg[%#x] = %#x\n", + domain->id, str_wake_sleep(wake), ERR_PTR(ret), + domain->reg_ack.addr, value); + if (value == ~0) { + xe_gt_err(gt, + "Force wake domain %d: %s. MMIO unreliable (forcewake register returns 0xFFFFFFFF)!\n", + domain->id, str_wake_sleep(wake)); + ret = -EIO; + } return ret; } @@ -150,52 +160,108 @@ static int domain_sleep_wait(struct xe_gt *gt, (ffs(tmp__) - 1))) && \ domain__->reg_ctl.addr) -int xe_force_wake_get(struct xe_force_wake *fw, - enum xe_force_wake_domains domains) +/** + * xe_force_wake_get() : Increase the domain refcount + * @fw: struct xe_force_wake + * @domains: forcewake domains to get refcount on + * + * This function wakes up @domains if they are asleep and takes references. + * If requested domain is XE_FORCEWAKE_ALL then only applicable/initialized + * domains will be considered for refcount and it is a caller responsibility + * to check returned ref if it includes any specific domain by using + * xe_force_wake_ref_has_domain() function. Caller must call + * xe_force_wake_put() function to decrease incremented refcounts. + * + * Return: opaque reference to woken domains or zero if none of requested + * domains were awake. + */ +unsigned int __must_check xe_force_wake_get(struct xe_force_wake *fw, + enum xe_force_wake_domains domains) { struct xe_gt *gt = fw->gt; struct xe_force_wake_domain *domain; - enum xe_force_wake_domains tmp, woken = 0; + unsigned int ref_incr = 0, awake_rqst = 0, awake_failed = 0; + unsigned int tmp, ref_rqst; unsigned long flags; - int ret = 0; + xe_gt_assert(gt, is_power_of_2(domains)); + xe_gt_assert(gt, domains <= XE_FORCEWAKE_ALL); + xe_gt_assert(gt, domains == XE_FORCEWAKE_ALL || fw->initialized_domains & domains); + + ref_rqst = (domains == XE_FORCEWAKE_ALL) ? fw->initialized_domains : domains; spin_lock_irqsave(&fw->lock, flags); - for_each_fw_domain_masked(domain, domains, fw, tmp) { + for_each_fw_domain_masked(domain, ref_rqst, fw, tmp) { if (!domain->ref++) { - woken |= BIT(domain->id); + awake_rqst |= BIT(domain->id); domain_wake(gt, domain); } + ref_incr |= BIT(domain->id); } - for_each_fw_domain_masked(domain, woken, fw, tmp) { - ret |= domain_wake_wait(gt, domain); + for_each_fw_domain_masked(domain, awake_rqst, fw, tmp) { + if (domain_wake_wait(gt, domain) == 0) { + fw->awake_domains |= BIT(domain->id); + } else { + awake_failed |= BIT(domain->id); + --domain->ref; + } } - fw->awake_domains |= woken; + ref_incr &= ~awake_failed; spin_unlock_irqrestore(&fw->lock, flags); - return ret; + xe_gt_WARN(gt, awake_failed, "Forcewake domain%s %#x failed to acknowledge awake request\n", + str_plural(hweight_long(awake_failed)), awake_failed); + + if (domains == XE_FORCEWAKE_ALL && ref_incr == fw->initialized_domains) + ref_incr |= XE_FORCEWAKE_ALL; + + return ref_incr; } -int xe_force_wake_put(struct xe_force_wake *fw, - enum xe_force_wake_domains domains) +/** + * xe_force_wake_put - Decrement the refcount and put domain to sleep if refcount becomes 0 + * @fw: Pointer to the force wake structure + * @fw_ref: return of xe_force_wake_get() + * + * This function reduces the reference counts for domains in fw_ref. If + * refcount for any of the specified domain reaches 0, it puts the domain to sleep + * and waits for acknowledgment for domain to sleep within 50 milisec timeout. + * Warns in case of timeout of ack from domain. + */ +void xe_force_wake_put(struct xe_force_wake *fw, unsigned int fw_ref) { struct xe_gt *gt = fw->gt; struct xe_force_wake_domain *domain; - enum xe_force_wake_domains tmp, sleep = 0; + unsigned int tmp, sleep = 0; unsigned long flags; - int ret = 0; + int ack_fail = 0; + + /* + * Avoid unnecessary lock and unlock when the function is called + * in error path of individual domains. + */ + if (!fw_ref) + return; + + if (xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) + fw_ref = fw->initialized_domains; spin_lock_irqsave(&fw->lock, flags); - for_each_fw_domain_masked(domain, domains, fw, tmp) { + for_each_fw_domain_masked(domain, fw_ref, fw, tmp) { + xe_gt_assert(gt, domain->ref); + if (!--domain->ref) { sleep |= BIT(domain->id); domain_sleep(gt, domain); } } for_each_fw_domain_masked(domain, sleep, fw, tmp) { - ret |= domain_sleep_wait(gt, domain); + if (domain_sleep_wait(gt, domain) == 0) + fw->awake_domains &= ~BIT(domain->id); + else + ack_fail |= BIT(domain->id); } - fw->awake_domains &= ~sleep; spin_unlock_irqrestore(&fw->lock, flags); - return ret; + xe_gt_WARN(gt, ack_fail, "Forcewake domain%s %#x failed to acknowledge sleep request\n", + str_plural(hweight_long(ack_fail)), ack_fail); } diff --git a/drivers/gpu/drm/xe/xe_force_wake.h b/drivers/gpu/drm/xe/xe_force_wake.h index a2577672f4e3..0e3e84bfa51c 100644 --- a/drivers/gpu/drm/xe/xe_force_wake.h +++ b/drivers/gpu/drm/xe/xe_force_wake.h @@ -15,10 +15,9 @@ void xe_force_wake_init_gt(struct xe_gt *gt, struct xe_force_wake *fw); void xe_force_wake_init_engines(struct xe_gt *gt, struct xe_force_wake *fw); -int xe_force_wake_get(struct xe_force_wake *fw, - enum xe_force_wake_domains domains); -int xe_force_wake_put(struct xe_force_wake *fw, - enum xe_force_wake_domains domains); +unsigned int __must_check xe_force_wake_get(struct xe_force_wake *fw, + enum xe_force_wake_domains domains); +void xe_force_wake_put(struct xe_force_wake *fw, unsigned int fw_ref); static inline int xe_force_wake_ref(struct xe_force_wake *fw, @@ -46,4 +45,20 @@ xe_force_wake_assert_held(struct xe_force_wake *fw, xe_gt_assert(fw->gt, fw->awake_domains & domain); } +/** + * xe_force_wake_ref_has_domain - verifies if the domains are in fw_ref + * @fw_ref : the force_wake reference + * @domain : forcewake domain to verify + * + * This function confirms whether the @fw_ref includes a reference to the + * specified @domain. + * + * Return: true if domain is refcounted. + */ +static inline bool +xe_force_wake_ref_has_domain(unsigned int fw_ref, enum xe_force_wake_domains domain) +{ + return fw_ref & domain; +} + #endif diff --git a/drivers/gpu/drm/xe/xe_force_wake_types.h b/drivers/gpu/drm/xe/xe_force_wake_types.h index ed0edc2cdf9f..899fbbcb3ea9 100644 --- a/drivers/gpu/drm/xe/xe_force_wake_types.h +++ b/drivers/gpu/drm/xe/xe_force_wake_types.h @@ -48,7 +48,7 @@ enum xe_force_wake_domains { XE_FW_MEDIA_VEBOX2 = BIT(XE_FW_DOMAIN_ID_MEDIA_VEBOX2), XE_FW_MEDIA_VEBOX3 = BIT(XE_FW_DOMAIN_ID_MEDIA_VEBOX3), XE_FW_GSC = BIT(XE_FW_DOMAIN_ID_GSC), - XE_FORCEWAKE_ALL = BIT(XE_FW_DOMAIN_ID_COUNT) - 1 + XE_FORCEWAKE_ALL = BIT(XE_FW_DOMAIN_ID_COUNT) }; /** @@ -78,7 +78,9 @@ struct xe_force_wake { /** @lock: protects everything force wake struct */ spinlock_t lock; /** @awake_domains: mask of all domains awake */ - enum xe_force_wake_domains awake_domains; + unsigned int awake_domains; + /** @initialized_domains: mask of all initialized domains */ + unsigned int initialized_domains; /** @domains: force wake domains */ struct xe_force_wake_domain domains[XE_FW_DOMAIN_ID_COUNT]; }; diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c index 47bfd9d2635d..558fac8bb6fb 100644 --- a/drivers/gpu/drm/xe/xe_ggtt.c +++ b/drivers/gpu/drm/xe/xe_ggtt.c @@ -246,7 +246,7 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt) else ggtt->pt_ops = &xelp_pt_ops; - ggtt->wq = alloc_workqueue("xe-ggtt-wq", 0, 0); + ggtt->wq = alloc_workqueue("xe-ggtt-wq", 0, WQ_MEM_RECLAIM); drm_mm_init(&ggtt->mm, xe_wopcm_size(xe), ggtt->size - xe_wopcm_size(xe)); @@ -401,6 +401,16 @@ static void ggtt_invalidate_gt_tlb(struct xe_gt *gt) static void xe_ggtt_invalidate(struct xe_ggtt *ggtt) { + struct xe_device *xe = tile_to_xe(ggtt->tile); + + /* + * XXX: Barrier for GGTT pages. Unsure exactly why this required but + * without this LNL is having issues with the GuC reading scratch page + * vs. correct GGTT page. Not particularly a hot code path so blindly + * do a mmio read here which results in GuC reading correct GGTT page. + */ + xe_mmio_read32(xe_root_tile_mmio(xe), VF_CAP_REG); + /* Each GT in a tile has its own TLB to cache GGTT lookups */ ggtt_invalidate_gt_tlb(ggtt->tile->primary_gt); ggtt_invalidate_gt_tlb(ggtt->tile->media_gt); @@ -603,7 +613,7 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo, u64 start, u64 end) { int err; - u64 alignment = XE_PAGE_SIZE; + u64 alignment = bo->min_align > 0 ? bo->min_align : XE_PAGE_SIZE; if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K) alignment = SZ_64K; diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler.h b/drivers/gpu/drm/xe/xe_gpu_scheduler.h index 5ad5629a6c60..64b2ae6839db 100644 --- a/drivers/gpu/drm/xe/xe_gpu_scheduler.h +++ b/drivers/gpu/drm/xe/xe_gpu_scheduler.h @@ -63,7 +63,9 @@ xe_sched_invalidate_job(struct xe_sched_job *job, int threshold) static inline void xe_sched_add_pending_job(struct xe_gpu_scheduler *sched, struct xe_sched_job *job) { + spin_lock(&sched->base.job_list_lock); list_add(&job->drm.list, &sched->base.pending_list); + spin_unlock(&sched->base.job_list_lock); } static inline diff --git a/drivers/gpu/drm/xe/xe_gsc.c b/drivers/gpu/drm/xe/xe_gsc.c index 783b09bf3681..1eb791ddc375 100644 --- a/drivers/gpu/drm/xe/xe_gsc.c +++ b/drivers/gpu/drm/xe/xe_gsc.c @@ -261,19 +261,17 @@ static int gsc_upload_and_init(struct xe_gsc *gsc) { struct xe_gt *gt = gsc_to_gt(gsc); struct xe_tile *tile = gt_to_tile(gt); + unsigned int fw_ref; int ret; if (XE_WA(tile->primary_gt, 14018094691)) { - ret = xe_force_wake_get(gt_to_fw(tile->primary_gt), XE_FORCEWAKE_ALL); + fw_ref = xe_force_wake_get(gt_to_fw(tile->primary_gt), XE_FORCEWAKE_ALL); /* * If the forcewake fails we want to keep going, because the worst * case outcome in failing to apply the WA is that PXP won't work, - * which is not fatal. We still throw a warning so the issue is - * seen if it happens. + * which is not fatal. Forcewake get warns implicitly in case of failure */ - xe_gt_WARN_ON(tile->primary_gt, ret); - xe_gt_mcr_multicast_write(tile->primary_gt, EU_SYSTOLIC_LIC_THROTTLE_CTL_WITH_LOCK, EU_SYSTOLIC_LIC_THROTTLE_CTL_LOCK_BIT); @@ -282,7 +280,7 @@ static int gsc_upload_and_init(struct xe_gsc *gsc) ret = gsc_upload(gsc); if (XE_WA(tile->primary_gt, 14018094691)) - xe_force_wake_put(gt_to_fw(tile->primary_gt), XE_FORCEWAKE_ALL); + xe_force_wake_put(gt_to_fw(tile->primary_gt), fw_ref); if (ret) return ret; @@ -352,6 +350,7 @@ static void gsc_work(struct work_struct *work) struct xe_gsc *gsc = container_of(work, typeof(*gsc), work); struct xe_gt *gt = gsc_to_gt(gsc); struct xe_device *xe = gt_to_xe(gt); + unsigned int fw_ref; u32 actions; int ret; @@ -361,7 +360,7 @@ static void gsc_work(struct work_struct *work) spin_unlock_irq(&gsc->lock); xe_pm_runtime_get(xe); - xe_gt_WARN_ON(gt, xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC)); + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC); if (actions & GSC_ACTION_ER_COMPLETE) { ret = gsc_er_complete(gt); @@ -381,7 +380,7 @@ static void gsc_work(struct work_struct *work) xe_gsc_proxy_request_handler(gsc); out: - xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC); + xe_force_wake_put(gt_to_fw(gt), fw_ref); xe_pm_runtime_put(xe); } @@ -601,7 +600,7 @@ void xe_gsc_print_info(struct xe_gsc *gsc, struct drm_printer *p) { struct xe_gt *gt = gsc_to_gt(gsc); struct xe_mmio *mmio = >->mmio; - int err; + unsigned int fw_ref; xe_uc_fw_print(&gsc->fw, p); @@ -610,8 +609,8 @@ void xe_gsc_print_info(struct xe_gsc *gsc, struct drm_printer *p) if (!xe_uc_fw_is_enabled(&gsc->fw)) return; - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC); - if (err) + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC); + if (!fw_ref) return; drm_printf(p, "\nHECI1 FWSTS: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", @@ -622,5 +621,5 @@ void xe_gsc_print_info(struct xe_gsc *gsc, struct drm_printer *p) xe_mmio_read32(mmio, HECI_FWSTS5(MTL_GSC_HECI1_BASE)), xe_mmio_read32(mmio, HECI_FWSTS6(MTL_GSC_HECI1_BASE))); - xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } diff --git a/drivers/gpu/drm/xe/xe_gsc_proxy.c b/drivers/gpu/drm/xe/xe_gsc_proxy.c index 6d89c22ae811..fc64b45d324b 100644 --- a/drivers/gpu/drm/xe/xe_gsc_proxy.c +++ b/drivers/gpu/drm/xe/xe_gsc_proxy.c @@ -450,22 +450,21 @@ void xe_gsc_proxy_remove(struct xe_gsc *gsc) { struct xe_gt *gt = gsc_to_gt(gsc); struct xe_device *xe = gt_to_xe(gt); - int err = 0; + unsigned int fw_ref = 0; if (!gsc->proxy.component_added) return; /* disable HECI2 IRQs */ xe_pm_runtime_get(xe); - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC); - if (err) + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC); + if (!fw_ref) xe_gt_err(gt, "failed to get forcewake to disable GSC interrupts\n"); /* try do disable irq even if forcewake failed */ gsc_proxy_irq_toggle(gsc, false); - if (!err) - xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC); + xe_force_wake_put(gt_to_fw(gt), fw_ref); xe_pm_runtime_put(xe); xe_gsc_wait_for_worker_completion(gsc); diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c index 1c79660fb086..d6744be01a68 100644 --- a/drivers/gpu/drm/xe/xe_gt.c +++ b/drivers/gpu/drm/xe/xe_gt.c @@ -77,7 +77,8 @@ struct xe_gt *xe_gt_alloc(struct xe_tile *tile) return ERR_PTR(-ENOMEM); gt->tile = tile; - gt->ordered_wq = alloc_ordered_workqueue("gt-ordered-wq", 0); + gt->ordered_wq = alloc_ordered_workqueue("gt-ordered-wq", + WQ_MEM_RECLAIM); err = drmm_add_action_or_reset(>_to_xe(gt)->drm, gt_fini, gt); if (err) @@ -97,14 +98,14 @@ void xe_gt_sanitize(struct xe_gt *gt) static void xe_gt_enable_host_l2_vram(struct xe_gt *gt) { + unsigned int fw_ref; u32 reg; - int err; if (!XE_WA(gt, 16023588340)) return; - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (WARN_ON(err)) + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) return; if (!xe_gt_is_media_type(gt)) { @@ -114,13 +115,13 @@ static void xe_gt_enable_host_l2_vram(struct xe_gt *gt) } xe_gt_mcr_multicast_write(gt, XEHPC_L3CLOS_MASK(3), 0x3); - xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } static void xe_gt_disable_host_l2_vram(struct xe_gt *gt) { + unsigned int fw_ref; u32 reg; - int err; if (!XE_WA(gt, 16023588340)) return; @@ -128,15 +129,15 @@ static void xe_gt_disable_host_l2_vram(struct xe_gt *gt) if (xe_gt_is_media_type(gt)) return; - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (WARN_ON(err)) + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) return; reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL); reg &= ~CG_DIS_CNTLBUS; xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg); - xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } /** @@ -402,11 +403,14 @@ static void dump_pat_on_error(struct xe_gt *gt) static int gt_fw_domain_init(struct xe_gt *gt) { + unsigned int fw_ref; int err, i; - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (err) + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) { + err = -ETIMEDOUT; goto err_hw_fence_irq; + } if (!xe_gt_is_media_type(gt)) { err = xe_ggtt_init(gt_to_tile(gt)->mem.ggtt); @@ -441,14 +445,12 @@ static int gt_fw_domain_init(struct xe_gt *gt) */ gt->info.gmdid = xe_mmio_read32(>->mmio, GMD_ID); - err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); - XE_WARN_ON(err); - + xe_force_wake_put(gt_to_fw(gt), fw_ref); return 0; err_force_wake: dump_pat_on_error(gt); - xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + xe_force_wake_put(gt_to_fw(gt), fw_ref); err_hw_fence_irq: for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i) xe_hw_fence_irq_finish(>->fence_irq[i]); @@ -458,11 +460,14 @@ err_hw_fence_irq: static int all_fw_domain_init(struct xe_gt *gt) { + unsigned int fw_ref; int err, i; - err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); - if (err) - goto err_hw_fence_irq; + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) { + err = -ETIMEDOUT; + goto err_force_wake; + } xe_gt_mcr_set_implicit_defaults(gt); xe_reg_sr_apply_mmio(>->reg_sr, gt); @@ -526,14 +531,12 @@ static int all_fw_domain_init(struct xe_gt *gt) if (IS_SRIOV_PF(gt_to_xe(gt))) xe_gt_sriov_pf_init_hw(gt); - err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL); - XE_WARN_ON(err); + xe_force_wake_put(gt_to_fw(gt), fw_ref); return 0; err_force_wake: - xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL); -err_hw_fence_irq: + xe_force_wake_put(gt_to_fw(gt), fw_ref); for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i) xe_hw_fence_irq_finish(>->fence_irq[i]); @@ -546,11 +549,12 @@ err_hw_fence_irq: */ int xe_gt_init_hwconfig(struct xe_gt *gt) { + unsigned int fw_ref; int err; - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (err) - goto out; + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) + return -ETIMEDOUT; xe_gt_mcr_init_early(gt); xe_pat_init(gt); @@ -568,8 +572,7 @@ int xe_gt_init_hwconfig(struct xe_gt *gt) xe_gt_enable_host_l2_vram(gt); out_fw: - xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); -out: + xe_force_wake_put(gt_to_fw(gt), fw_ref); return err; } @@ -764,6 +767,7 @@ static int do_gt_restart(struct xe_gt *gt) static int gt_reset(struct xe_gt *gt) { + unsigned int fw_ref; int err; if (xe_device_wedged(gt_to_xe(gt))) @@ -784,9 +788,11 @@ static int gt_reset(struct xe_gt *gt) xe_gt_sanitize(gt); - err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); - if (err) - goto err_msg; + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) { + err = -ETIMEDOUT; + goto err_out; + } xe_uc_gucrc_disable(>->uc); xe_uc_stop_prepare(>->uc); @@ -804,8 +810,7 @@ static int gt_reset(struct xe_gt *gt) if (err) goto err_out; - err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL); - XE_WARN_ON(err); + xe_force_wake_put(gt_to_fw(gt), fw_ref); xe_pm_runtime_put(gt_to_xe(gt)); xe_gt_info(gt, "reset done\n"); @@ -813,8 +818,7 @@ static int gt_reset(struct xe_gt *gt) return 0; err_out: - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); -err_msg: + xe_force_wake_put(gt_to_fw(gt), fw_ref); XE_WARN_ON(xe_uc_start(>->uc)); err_fail: xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err)); @@ -834,7 +838,7 @@ static void gt_reset_worker(struct work_struct *w) void xe_gt_reset_async(struct xe_gt *gt) { - xe_gt_info(gt, "trying reset\n"); + xe_gt_info(gt, "trying reset from %ps\n", __builtin_return_address(0)); /* Don't do a reset while one is already in flight */ if (!xe_fault_inject_gt_reset() && xe_uc_reset_prepare(>->uc)) @@ -846,22 +850,25 @@ void xe_gt_reset_async(struct xe_gt *gt) void xe_gt_suspend_prepare(struct xe_gt *gt) { - XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL)); + unsigned int fw_ref; + + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); xe_uc_stop_prepare(>->uc); - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } int xe_gt_suspend(struct xe_gt *gt) { + unsigned int fw_ref; int err; xe_gt_dbg(gt, "suspending\n"); xe_gt_sanitize(gt); - err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); - if (err) + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) goto err_msg; err = xe_uc_suspend(>->uc); @@ -872,14 +879,15 @@ int xe_gt_suspend(struct xe_gt *gt) xe_gt_disable_host_l2_vram(gt); - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); + xe_force_wake_put(gt_to_fw(gt), fw_ref); xe_gt_dbg(gt, "suspended\n"); return 0; -err_force_wake: - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); err_msg: + err = -ETIMEDOUT; +err_force_wake: + xe_force_wake_put(gt_to_fw(gt), fw_ref); xe_gt_err(gt, "suspend failed (%pe)\n", ERR_PTR(err)); return err; @@ -887,9 +895,11 @@ err_msg: void xe_gt_shutdown(struct xe_gt *gt) { - xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + unsigned int fw_ref; + + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); do_gt_reset(gt); - xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } /** @@ -914,11 +924,12 @@ int xe_gt_sanitize_freq(struct xe_gt *gt) int xe_gt_resume(struct xe_gt *gt) { + unsigned int fw_ref; int err; xe_gt_dbg(gt, "resuming\n"); - err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); - if (err) + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) goto err_msg; err = do_gt_restart(gt); @@ -927,14 +938,15 @@ int xe_gt_resume(struct xe_gt *gt) xe_gt_idle_enable_pg(gt); - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); + xe_force_wake_put(gt_to_fw(gt), fw_ref); xe_gt_dbg(gt, "resumed\n"); return 0; -err_force_wake: - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); err_msg: + err = -ETIMEDOUT; +err_force_wake: + xe_force_wake_put(gt_to_fw(gt), fw_ref); xe_gt_err(gt, "resume failed (%pe)\n", ERR_PTR(err)); return err; diff --git a/drivers/gpu/drm/xe/xe_gt_ccs_mode.c b/drivers/gpu/drm/xe/xe_gt_ccs_mode.c index 9360ac4de489..b6adfb9f2030 100644 --- a/drivers/gpu/drm/xe/xe_gt_ccs_mode.c +++ b/drivers/gpu/drm/xe/xe_gt_ccs_mode.c @@ -68,6 +68,12 @@ static void __xe_gt_apply_ccs_mode(struct xe_gt *gt, u32 num_engines) } } + /* + * Mask bits need to be set for the register. Though only Xe2+ + * platforms require setting of mask bits, it won't harm for older + * platforms as these bits are unused there. + */ + mode |= CCS_MODE_CSLICE_0_3_MASK << 16; xe_mmio_write32(>->mmio, CCS_MODE, mode); xe_gt_dbg(gt, "CCS_MODE=%x config:%08x, num_engines:%d, num_slices:%d\n", @@ -133,9 +139,10 @@ ccs_mode_store(struct device *kdev, struct device_attribute *attr, } /* CCS mode can only be updated when there are no drm clients */ - spin_lock(&xe->clients.lock); - if (xe->clients.count) { - spin_unlock(&xe->clients.lock); + mutex_lock(&xe->drm.filelist_mutex); + if (!list_empty(&xe->drm.filelist)) { + mutex_unlock(&xe->drm.filelist_mutex); + xe_gt_dbg(gt, "Rejecting compute mode change as there are active drm clients\n"); return -EBUSY; } @@ -146,7 +153,7 @@ ccs_mode_store(struct device *kdev, struct device_attribute *attr, xe_gt_reset_async(gt); } - spin_unlock(&xe->clients.lock); + mutex_unlock(&xe->drm.filelist_mutex); return count; } diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.c b/drivers/gpu/drm/xe/xe_gt_debugfs.c index cbc43973ff7e..3e8c351a0eab 100644 --- a/drivers/gpu/drm/xe/xe_gt_debugfs.c +++ b/drivers/gpu/drm/xe/xe_gt_debugfs.c @@ -90,22 +90,21 @@ static int hw_engines(struct xe_gt *gt, struct drm_printer *p) struct xe_device *xe = gt_to_xe(gt); struct xe_hw_engine *hwe; enum xe_hw_engine_id id; - int err; + unsigned int fw_ref; xe_pm_runtime_get(xe); - err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); - if (err) { + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) { xe_pm_runtime_put(xe); - return err; + xe_force_wake_put(gt_to_fw(gt), fw_ref); + return -ETIMEDOUT; } for_each_hw_engine(hwe, gt, id) xe_hw_engine_print(hwe, p); - err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL); + xe_force_wake_put(gt_to_fw(gt), fw_ref); xe_pm_runtime_put(xe); - if (err) - return err; return 0; } diff --git a/drivers/gpu/drm/xe/xe_gt_idle.c b/drivers/gpu/drm/xe/xe_gt_idle.c index 746812aee8ff..fd80afeef56a 100644 --- a/drivers/gpu/drm/xe/xe_gt_idle.c +++ b/drivers/gpu/drm/xe/xe_gt_idle.c @@ -101,6 +101,7 @@ void xe_gt_idle_enable_pg(struct xe_gt *gt) struct xe_gt_idle *gtidle = >->gtidle; struct xe_mmio *mmio = >->mmio; u32 vcs_mask, vecs_mask; + unsigned int fw_ref; int i, j; if (IS_SRIOV_VF(xe)) @@ -127,7 +128,7 @@ void xe_gt_idle_enable_pg(struct xe_gt *gt) VDN_MFXVDENC_POWERGATE_ENABLE(j)); } - XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FW_GT)); + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); if (xe->info.skip_guc_pc) { /* * GuC sets the hysteresis value when GuC PC is enabled @@ -138,12 +139,13 @@ void xe_gt_idle_enable_pg(struct xe_gt *gt) } xe_mmio_write32(mmio, POWERGATE_ENABLE, gtidle->powergate_enable); - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FW_GT)); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } void xe_gt_idle_disable_pg(struct xe_gt *gt) { struct xe_gt_idle *gtidle = >->gtidle; + unsigned int fw_ref; if (IS_SRIOV_VF(gt_to_xe(gt))) return; @@ -151,9 +153,9 @@ void xe_gt_idle_disable_pg(struct xe_gt *gt) xe_device_assert_mem_access(gt_to_xe(gt)); gtidle->powergate_enable = 0; - XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FW_GT)); + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); xe_mmio_write32(>->mmio, POWERGATE_ENABLE, gtidle->powergate_enable); - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FW_GT)); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } /** @@ -172,7 +174,8 @@ int xe_gt_idle_pg_print(struct xe_gt *gt, struct drm_printer *p) enum xe_gt_idle_state state; u32 pg_enabled, pg_status = 0; u32 vcs_mask, vecs_mask; - int err, n; + unsigned int fw_ref; + int n; /* * Media Slices * @@ -208,14 +211,14 @@ int xe_gt_idle_pg_print(struct xe_gt *gt, struct drm_printer *p) /* Do not wake the GT to read powergating status */ if (state != GT_IDLE_C6) { - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (err) - return err; + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) + return -ETIMEDOUT; pg_enabled = xe_mmio_read32(>->mmio, POWERGATE_ENABLE); pg_status = xe_mmio_read32(>->mmio, POWERGATE_DOMAIN_STATUS); - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FW_GT)); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } if (gt->info.engine_mask & XE_HW_ENGINE_RCS_MASK) { @@ -298,13 +301,14 @@ static void gt_idle_fini(void *arg) { struct kobject *kobj = arg; struct xe_gt *gt = kobj_to_gt(kobj->parent); + unsigned int fw_ref; xe_gt_idle_disable_pg(gt); if (gt_to_xe(gt)->info.skip_guc_pc) { - XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FW_GT)); + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); xe_gt_idle_disable_c6(gt); - xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } sysfs_remove_files(kobj, gt_idle_attrs); diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c index a863e50b756e..192643d63d22 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c @@ -395,6 +395,8 @@ static void pf_release_ggtt(struct xe_tile *tile, struct xe_ggtt_node *node) * the xe_ggtt_clear() called by below xe_ggtt_remove_node(). */ xe_ggtt_node_remove(node, false); + } else { + xe_ggtt_node_fini(node); } } @@ -450,7 +452,7 @@ static int pf_provision_vf_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size) config->ggtt_region = node; return 0; err: - xe_ggtt_node_fini(node); + pf_release_ggtt(tile, node); return err; } @@ -2377,6 +2379,41 @@ int xe_gt_sriov_pf_config_print_dbs(struct xe_gt *gt, struct drm_printer *p) } /** + * xe_gt_sriov_pf_config_print_lmem - Print LMEM configurations. + * @gt: the &xe_gt + * @p: the &drm_printer + * + * Print LMEM allocations across all VFs. + * VFs without LMEM allocation are skipped. + * + * This function can only be called on PF. + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_config_print_lmem(struct xe_gt *gt, struct drm_printer *p) +{ + unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt)); + const struct xe_gt_sriov_config *config; + char buf[10]; + + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); + + for (n = 1; n <= total_vfs; n++) { + config = >->sriov.pf.vfs[n].config; + if (!config->lmem_obj) + continue; + + string_get_size(config->lmem_obj->size, 1, STRING_UNITS_2, + buf, sizeof(buf)); + drm_printf(p, "VF%u:\t%zu\t(%s)\n", + n, config->lmem_obj->size, buf); + } + + mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); + return 0; +} + +/** * xe_gt_sriov_pf_config_print_available_ggtt - Print available GGTT ranges. * @gt: the &xe_gt * @p: the &drm_printer diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h index b74ec38baa18..0c55aa40a1a7 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h @@ -65,6 +65,7 @@ void xe_gt_sriov_pf_config_restart(struct xe_gt *gt); int xe_gt_sriov_pf_config_print_ggtt(struct xe_gt *gt, struct drm_printer *p); int xe_gt_sriov_pf_config_print_ctxs(struct xe_gt *gt, struct drm_printer *p); int xe_gt_sriov_pf_config_print_dbs(struct xe_gt *gt, struct drm_printer *p); +int xe_gt_sriov_pf_config_print_lmem(struct xe_gt *gt, struct drm_printer *p); int xe_gt_sriov_pf_config_print_available_ggtt(struct xe_gt *gt, struct drm_printer *p); diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c index 91fc42e386d8..05df4ab3514b 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c @@ -82,6 +82,11 @@ static const struct drm_info_list pf_info[] = { .data = xe_gt_sriov_pf_config_print_dbs, }, { + "lmem_provisioned", + .show = xe_gt_debugfs_simple_show, + .data = xe_gt_sriov_pf_config_print_lmem, + }, + { "runtime_registers", .show = xe_gt_debugfs_simple_show, .data = xe_gt_sriov_pf_service_print_runtime, diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c index a530a933eedc..3cb228c773cd 100644 --- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c +++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c @@ -72,6 +72,8 @@ static void xe_gt_tlb_fence_timeout(struct work_struct *work) struct xe_device *xe = gt_to_xe(gt); struct xe_gt_tlb_invalidation_fence *fence, *next; + LNL_FLUSH_WORK(>->uc.guc.ct.g2h_worker); + spin_lock_irq(>->tlb_invalidation.pending_lock); list_for_each_entry_safe(fence, next, >->tlb_invalidation.pending_fences, link) { @@ -268,6 +270,7 @@ static int xe_gt_tlb_invalidation_guc(struct xe_gt *gt, int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt) { struct xe_device *xe = gt_to_xe(gt); + unsigned int fw_ref; if (xe_guc_ct_enabled(>->uc.guc.ct) && gt->uc.guc.submission_state.enabled) { @@ -286,7 +289,7 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt) if (IS_SRIOV_VF(xe)) return 0; - xe_gt_WARN_ON(gt, xe_force_wake_get(gt_to_fw(gt), XE_FW_GT)); + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); if (xe->info.platform == XE_PVC || GRAPHICS_VER(xe) >= 20) { xe_mmio_write32(mmio, PVC_GUC_TLB_INV_DESC1, PVC_GUC_TLB_INV_DESC1_INVALIDATE); @@ -296,7 +299,7 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt) xe_mmio_write32(mmio, GUC_TLB_INV_CR, GUC_TLB_INV_CR_INVALIDATE); } - xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } return 0; diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c index 8570b1218287..7f704346a8f4 100644 --- a/drivers/gpu/drm/xe/xe_guc.c +++ b/drivers/gpu/drm/xe/xe_guc.c @@ -70,7 +70,7 @@ static u32 guc_ctl_debug_flags(struct xe_guc *guc) static u32 guc_ctl_feature_flags(struct xe_guc *guc) { - u32 flags = 0; + u32 flags = GUC_CTL_ENABLE_LITE_RESTORE; if (!guc_to_xe(guc)->info.skip_guc_pc) flags |= GUC_CTL_ENABLE_SLPC; @@ -248,10 +248,11 @@ static void guc_fini_hw(void *arg) { struct xe_guc *guc = arg; struct xe_gt *gt = guc_to_gt(guc); + unsigned int fw_ref; - xe_gt_WARN_ON(gt, xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL)); + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); xe_uc_fini_hw(&guc_to_gt(guc)->uc); - xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } /** @@ -1155,14 +1156,14 @@ int xe_guc_start(struct xe_guc *guc) void xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p) { struct xe_gt *gt = guc_to_gt(guc); + unsigned int fw_ref; u32 status; - int err; int i; xe_uc_fw_print(&guc->fw, p); - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (err) + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) return; status = xe_mmio_read32(>->mmio, GUC_STATUS); @@ -1183,9 +1184,12 @@ void xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p) i, xe_mmio_read32(>->mmio, SOFT_SCRATCH(i))); } - xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + xe_force_wake_put(gt_to_fw(gt), fw_ref); + + drm_puts(p, "\n"); + xe_guc_ct_print(&guc->ct, p, false); - xe_guc_ct_print(&guc->ct, p); + drm_puts(p, "\n"); xe_guc_submit_print(guc, p); } diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c index 25292997c7f3..4e746ae98888 100644 --- a/drivers/gpu/drm/xe/xe_guc_ads.c +++ b/drivers/gpu/drm/xe/xe_guc_ads.c @@ -359,6 +359,11 @@ static void guc_waklv_init(struct xe_guc_ads *ads) GUC_WORKAROUND_KLV_ID_DISABLE_MTP_DURING_ASYNC_COMPUTE, &offset, &remain); + if (XE_WA(gt, 14022866841)) + guc_waklv_enable_simple(ads, + GUC_WA_KLV_WAKE_POWER_DOMAINS_FOR_OUTBOUND_MMIO, + &offset, &remain); + /* * On RC6 exit, GuC will write register 0xB04 with the default value provided. As of now, * the default value for this register is determined to be 0xC40. This could change in the diff --git a/drivers/gpu/drm/xe/xe_guc_capture.c b/drivers/gpu/drm/xe/xe_guc_capture.c index 41262bda20ed..d63912d28246 100644 --- a/drivers/gpu/drm/xe/xe_guc_capture.c +++ b/drivers/gpu/drm/xe/xe_guc_capture.c @@ -102,6 +102,7 @@ struct __guc_capture_parsed_output { * A 64 bit register define requires 2 consecutive entries, * with low dword first and hi dword the second. * 2. Register name: null for incompleted define + * 3. Incorrect order will trigger XE_WARN. */ #define COMMON_XELP_BASE_GLOBAL \ { FORCEWAKE_GT, REG_32BIT, 0, 0, "FORCEWAKE_GT"} @@ -1531,7 +1532,7 @@ read_reg_to_node(struct xe_hw_engine *hwe, const struct __guc_mmio_reg_descr_gro { int i; - if (!list || list->num_regs == 0) + if (!list || !list->list || list->num_regs == 0) return; if (!regs) @@ -1541,9 +1542,6 @@ read_reg_to_node(struct xe_hw_engine *hwe, const struct __guc_mmio_reg_descr_gro struct __guc_mmio_reg_descr desc = list->list[i]; u32 value; - if (!list->list) - return; - if (list->type == GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE) { value = xe_hw_engine_mmio_read32(hwe, desc.reg); } else { @@ -1590,6 +1588,9 @@ xe_engine_manual_capture(struct xe_hw_engine *hwe, struct xe_hw_engine_snapshot u16 guc_id = 0; u32 lrca = 0; + if (IS_SRIOV_VF(xe)) + return; + new = guc_capture_get_prealloc_node(guc); if (!new) return; @@ -1675,10 +1676,10 @@ snapshot_print_by_list_order(struct xe_hw_engine_snapshot *snapshot, struct drm_ struct xe_devcoredump *devcoredump = &xe->devcoredump; struct xe_devcoredump_snapshot *devcore_snapshot = &devcoredump->snapshot; struct gcap_reg_list_info *reginfo = NULL; - u32 last_value, i; - bool is_ext; + u32 i, last_value = 0; + bool is_ext, low32_ready = false; - if (!list || list->num_regs == 0) + if (!list || !list->list || list->num_regs == 0) return; XE_WARN_ON(!devcore_snapshot->matched_node); @@ -1701,29 +1702,75 @@ snapshot_print_by_list_order(struct xe_hw_engine_snapshot *snapshot, struct drm_ continue; value = reg->value; - if (reg_desc->data_type == REG_64BIT_LOW_DW) { + switch (reg_desc->data_type) { + case REG_64BIT_LOW_DW: last_value = value; + + /* + * A 64 bit register define requires 2 consecutive + * entries in register list, with low dword first + * and hi dword the second, like: + * { XXX_REG_LO(0), REG_64BIT_LOW_DW, 0, 0, NULL}, + * { XXX_REG_HI(0), REG_64BIT_HI_DW, 0, 0, "XXX_REG"}, + * + * Incorrect order will trigger XE_WARN. + * + * Possible double low here, for example: + * { XXX_REG_LO(0), REG_64BIT_LOW_DW, 0, 0, NULL}, + * { XXX_REG_LO(0), REG_64BIT_LOW_DW, 0, 0, NULL}, + */ + XE_WARN_ON(low32_ready); + low32_ready = true; /* Low 32 bit dword saved, continue for high 32 bit */ - continue; - } else if (reg_desc->data_type == REG_64BIT_HI_DW) { + break; + + case REG_64BIT_HI_DW: { u64 value_qw = ((u64)value << 32) | last_value; + /* + * Incorrect 64bit register order. Possible missing low. + * for example: + * { XXX_REG(0), REG_32BIT, 0, 0, NULL}, + * { XXX_REG_HI(0), REG_64BIT_HI_DW, 0, 0, NULL}, + */ + XE_WARN_ON(!low32_ready); + low32_ready = false; + drm_printf(p, "\t%s: 0x%016llx\n", reg_desc->regname, value_qw); - continue; + break; } - if (is_ext) { - int dss, group, instance; + case REG_32BIT: + /* + * Incorrect 64bit register order. Possible missing high. + * for example: + * { XXX_REG_LO(0), REG_64BIT_LOW_DW, 0, 0, NULL}, + * { XXX_REG(0), REG_32BIT, 0, 0, "XXX_REG"}, + */ + XE_WARN_ON(low32_ready); - group = FIELD_GET(GUC_REGSET_STEERING_GROUP, reg_desc->flags); - instance = FIELD_GET(GUC_REGSET_STEERING_INSTANCE, reg_desc->flags); - dss = xe_gt_mcr_steering_info_to_dss_id(gt, group, instance); + if (is_ext) { + int dss, group, instance; - drm_printf(p, "\t%s[%u]: 0x%08x\n", reg_desc->regname, dss, value); - } else { - drm_printf(p, "\t%s: 0x%08x\n", reg_desc->regname, value); + group = FIELD_GET(GUC_REGSET_STEERING_GROUP, reg_desc->flags); + instance = FIELD_GET(GUC_REGSET_STEERING_INSTANCE, reg_desc->flags); + dss = xe_gt_mcr_steering_info_to_dss_id(gt, group, instance); + + drm_printf(p, "\t%s[%u]: 0x%08x\n", reg_desc->regname, dss, value); + } else { + drm_printf(p, "\t%s: 0x%08x\n", reg_desc->regname, value); + } + break; } } + + /* + * Incorrect 64bit register order. Possible missing high. + * for example: + * { XXX_REG_LO(0), REG_64BIT_LOW_DW, 0, 0, NULL}, + * } // <- Register list end + */ + XE_WARN_ON(low32_ready); } /** @@ -1820,7 +1867,7 @@ xe_guc_capture_get_matching_and_lock(struct xe_sched_job *job) return NULL; xe = gt_to_xe(q->gt); - if (xe->wedged.mode >= 2 || !xe_device_uc_enabled(xe)) + if (xe->wedged.mode >= 2 || !xe_device_uc_enabled(xe) || IS_SRIOV_VF(xe)) return NULL; ss = &xe->devcoredump.snapshot; @@ -1876,6 +1923,9 @@ xe_engine_snapshot_capture_for_job(struct xe_sched_job *job) enum xe_hw_engine_id id; u32 adj_logical_mask = q->logical_mask; + if (IS_SRIOV_VF(xe)) + return; + for_each_hw_engine(hwe, q->gt, id) { if (hwe->class != q->hwe->class || !(BIT(hwe->logical_instance) & adj_logical_mask)) { diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c index c7673f56d413..8aeb1789805c 100644 --- a/drivers/gpu/drm/xe/xe_guc_ct.c +++ b/drivers/gpu/drm/xe/xe_guc_ct.c @@ -213,7 +213,7 @@ int xe_guc_ct_init(struct xe_guc_ct *ct) xe_gt_assert(gt, !(guc_ct_size() % PAGE_SIZE)); - ct->g2h_wq = alloc_ordered_workqueue("xe-g2h-wq", 0); + ct->g2h_wq = alloc_ordered_workqueue("xe-g2h-wq", WQ_MEM_RECLAIM); if (!ct->g2h_wq) return -ENOMEM; @@ -1018,6 +1018,15 @@ retry_same_fence: ret = wait_event_timeout(ct->g2h_fence_wq, g2h_fence.done, HZ); + if (!ret) { + LNL_FLUSH_WORK(&ct->g2h_worker); + if (g2h_fence.done) { + xe_gt_warn(gt, "G2H fence %u, action %04x, done\n", + g2h_fence.seqno, action[0]); + ret = 1; + } + } + /* * Ensure we serialize with completion side to prevent UAF with fence going out of scope on * the stack, since we have no clue if it will fire after the timeout before we can erase @@ -1589,7 +1598,8 @@ static void g2h_worker_func(struct work_struct *w) receive_g2h(ct); } -struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_alloc(struct xe_guc_ct *ct, bool atomic) +static struct xe_guc_ct_snapshot *guc_ct_snapshot_alloc(struct xe_guc_ct *ct, bool atomic, + bool want_ctb) { struct xe_guc_ct_snapshot *snapshot; @@ -1597,7 +1607,7 @@ struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_alloc(struct xe_guc_ct *ct, bool a if (!snapshot) return NULL; - if (ct->bo) { + if (ct->bo && want_ctb) { snapshot->ctb_size = ct->bo->size; snapshot->ctb = kmalloc(snapshot->ctb_size, atomic ? GFP_ATOMIC : GFP_KERNEL); } @@ -1627,25 +1637,13 @@ static void guc_ctb_snapshot_print(struct guc_ctb_snapshot *snapshot, drm_printf(p, "\tstatus (memory): 0x%x\n", snapshot->desc.status); } -/** - * xe_guc_ct_snapshot_capture - Take a quick snapshot of the CT state. - * @ct: GuC CT object. - * @atomic: Boolean to indicate if this is called from atomic context like - * reset or CTB handler or from some regular path like debugfs. - * - * This can be printed out in a later stage like during dev_coredump - * analysis. - * - * Returns: a GuC CT snapshot object that must be freed by the caller - * by using `xe_guc_ct_snapshot_free`. - */ -struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct, - bool atomic) +static struct xe_guc_ct_snapshot *guc_ct_snapshot_capture(struct xe_guc_ct *ct, bool atomic, + bool want_ctb) { struct xe_device *xe = ct_to_xe(ct); struct xe_guc_ct_snapshot *snapshot; - snapshot = xe_guc_ct_snapshot_alloc(ct, atomic); + snapshot = guc_ct_snapshot_alloc(ct, atomic, want_ctb); if (!snapshot) { xe_gt_err(ct_to_gt(ct), "Skipping CTB snapshot entirely.\n"); return NULL; @@ -1665,6 +1663,21 @@ struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct, } /** + * xe_guc_ct_snapshot_capture - Take a quick snapshot of the CT state. + * @ct: GuC CT object. + * + * This can be printed out in a later stage like during dev_coredump + * analysis. This is safe to be called during atomic context. + * + * Returns: a GuC CT snapshot object that must be freed by the caller + * by using `xe_guc_ct_snapshot_free`. + */ +struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct) +{ + return guc_ct_snapshot_capture(ct, true, true); +} + +/** * xe_guc_ct_snapshot_print - Print out a given GuC CT snapshot. * @snapshot: GuC CT snapshot object. * @p: drm_printer where it will be printed out. @@ -1686,12 +1699,8 @@ void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot, drm_printf(p, "\tg2h outstanding: %d\n", snapshot->g2h_outstanding); - if (snapshot->ctb) { + if (snapshot->ctb) xe_print_blob_ascii85(p, "CTB data", snapshot->ctb, 0, snapshot->ctb_size); - } else { - drm_printf(p, "CTB snapshot missing!\n"); - return; - } } else { drm_puts(p, "CT disabled\n"); } @@ -1717,14 +1726,16 @@ void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot) * xe_guc_ct_print - GuC CT Print. * @ct: GuC CT. * @p: drm_printer where it will be printed out. + * @want_ctb: Should the full CTB content be dumped (vs just the headers) * - * This function quickly capture a snapshot and immediately print it out. + * This function will quickly capture a snapshot of the CT state + * and immediately print it out. */ -void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p) +void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool want_ctb) { struct xe_guc_ct_snapshot *snapshot; - snapshot = xe_guc_ct_snapshot_capture(ct, false); + snapshot = guc_ct_snapshot_capture(ct, false, want_ctb); xe_guc_ct_snapshot_print(snapshot, p); xe_guc_ct_snapshot_free(snapshot); } @@ -1758,7 +1769,7 @@ static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reaso return; snapshot_log = xe_guc_log_snapshot_capture(&guc->log, true); - snapshot_ct = xe_guc_ct_snapshot_capture((ct), true); + snapshot_ct = xe_guc_ct_snapshot_capture((ct)); spin_lock_irqsave(&ct->dead.lock, flags); diff --git a/drivers/gpu/drm/xe/xe_guc_ct.h b/drivers/gpu/drm/xe/xe_guc_ct.h index 338f0b75d29f..82c4ae458dda 100644 --- a/drivers/gpu/drm/xe/xe_guc_ct.h +++ b/drivers/gpu/drm/xe/xe_guc_ct.h @@ -17,11 +17,10 @@ void xe_guc_ct_disable(struct xe_guc_ct *ct); void xe_guc_ct_stop(struct xe_guc_ct *ct); void xe_guc_ct_fast_path(struct xe_guc_ct *ct); -struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_alloc(struct xe_guc_ct *ct, bool atomic); -struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct, bool atomic); +struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct); void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot, struct drm_printer *p); void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot); -void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p); +void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool want_ctb); static inline bool xe_guc_ct_enabled(struct xe_guc_ct *ct) { diff --git a/drivers/gpu/drm/xe/xe_guc_debugfs.c b/drivers/gpu/drm/xe/xe_guc_debugfs.c index d3822cbea273..995b306aced7 100644 --- a/drivers/gpu/drm/xe/xe_guc_debugfs.c +++ b/drivers/gpu/drm/xe/xe_guc_debugfs.c @@ -47,9 +47,23 @@ static int guc_log(struct seq_file *m, void *data) return 0; } +static int guc_ctb(struct seq_file *m, void *data) +{ + struct xe_guc *guc = node_to_guc(m->private); + struct xe_device *xe = guc_to_xe(guc); + struct drm_printer p = drm_seq_file_printer(m); + + xe_pm_runtime_get(xe); + xe_guc_ct_print(&guc->ct, &p, true); + xe_pm_runtime_put(xe); + + return 0; +} + static const struct drm_info_list debugfs_list[] = { {"guc_info", guc_info, 0}, {"guc_log", guc_log, 0}, + {"guc_ctb", guc_ctb, 0}, }; void xe_guc_debugfs_register(struct xe_guc *guc, struct dentry *parent) diff --git a/drivers/gpu/drm/xe/xe_guc_fwif.h b/drivers/gpu/drm/xe/xe_guc_fwif.h index 01e3ab590c3a..08ffe59f22fa 100644 --- a/drivers/gpu/drm/xe/xe_guc_fwif.h +++ b/drivers/gpu/drm/xe/xe_guc_fwif.h @@ -105,6 +105,7 @@ struct guc_update_exec_queue_policy { #define GUC_CTL_FEATURE 2 #define GUC_CTL_ENABLE_SLPC BIT(2) +#define GUC_CTL_ENABLE_LITE_RESTORE BIT(4) #define GUC_CTL_DISABLE_SCHEDULER BIT(14) #define GUC_CTL_DEBUG 3 diff --git a/drivers/gpu/drm/xe/xe_guc_log.c b/drivers/gpu/drm/xe/xe_guc_log.c index 93921f04153f..df4cfb698cdb 100644 --- a/drivers/gpu/drm/xe/xe_guc_log.c +++ b/drivers/gpu/drm/xe/xe_guc_log.c @@ -122,7 +122,7 @@ void xe_guc_log_snapshot_free(struct xe_guc_log_snapshot *snapshot) if (!snapshot) return; - if (!snapshot->copy) { + if (snapshot->copy) { for (i = 0; i < snapshot->num_chunks; i++) kfree(snapshot->copy[i]); kfree(snapshot->copy); @@ -145,8 +145,9 @@ struct xe_guc_log_snapshot *xe_guc_log_snapshot_capture(struct xe_guc_log *log, struct xe_device *xe = log_to_xe(log); struct xe_guc *guc = log_to_guc(log); struct xe_gt *gt = log_to_gt(log); + unsigned int fw_ref; size_t remain; - int i, err; + int i; if (!log->bo) { xe_gt_err(gt, "GuC log buffer not allocated\n"); @@ -168,12 +169,12 @@ struct xe_guc_log_snapshot *xe_guc_log_snapshot_capture(struct xe_guc_log *log, remain -= size; } - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (err) { - snapshot->stamp = ~0; + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) { + snapshot->stamp = ~0ULL; } else { - snapshot->stamp = xe_mmio_read32(>->mmio, GUC_PMTIMESTAMP); - xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + snapshot->stamp = xe_mmio_read64_2x32(>->mmio, GUC_PMTIMESTAMP_LO); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } snapshot->ktime = ktime_get_boottime_ns(); snapshot->level = log->level; @@ -204,7 +205,7 @@ void xe_guc_log_snapshot_print(struct xe_guc_log_snapshot *snapshot, struct drm_ snapshot->ver_found.major, snapshot->ver_found.minor, snapshot->ver_found.patch, snapshot->ver_want.major, snapshot->ver_want.minor, snapshot->ver_want.patch); drm_printf(p, "Kernel timestamp: 0x%08llX [%llu]\n", snapshot->ktime, snapshot->ktime); - drm_printf(p, "GuC timestamp: 0x%08X [%u]\n", snapshot->stamp, snapshot->stamp); + drm_printf(p, "GuC timestamp: 0x%08llX [%llu]\n", snapshot->stamp, snapshot->stamp); drm_printf(p, "Log level: %u\n", snapshot->level); remain = snapshot->size; diff --git a/drivers/gpu/drm/xe/xe_guc_log_types.h b/drivers/gpu/drm/xe/xe_guc_log_types.h index 4d57f8322efc..b3d5c72ac752 100644 --- a/drivers/gpu/drm/xe/xe_guc_log_types.h +++ b/drivers/gpu/drm/xe/xe_guc_log_types.h @@ -27,7 +27,7 @@ struct xe_guc_log_snapshot { /** @ktime: Kernel time the snapshot was taken */ u64 ktime; /** @stamp: GuC timestamp at which the snapshot was taken */ - u32 stamp; + u64 stamp; /** @level: GuC log verbosity level */ u32 level; /** @ver_found: GuC firmware version */ diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c index 2b654f820ae2..e8b9faeaef64 100644 --- a/drivers/gpu/drm/xe/xe_guc_pc.c +++ b/drivers/gpu/drm/xe/xe_guc_pc.c @@ -415,22 +415,24 @@ u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc) int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq) { struct xe_gt *gt = pc_to_gt(pc); - int ret; + unsigned int fw_ref; /* * GuC SLPC plays with cur freq request when GuCRC is enabled * Block RC6 for a more reliable read. */ - ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); - if (ret) - return ret; + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) { + xe_force_wake_put(gt_to_fw(gt), fw_ref); + return -ETIMEDOUT; + } *freq = xe_mmio_read32(>->mmio, RPNSWREQ); *freq = REG_FIELD_GET(REQ_RATIO_MASK, *freq); *freq = decode_freq(*freq); - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); + xe_force_wake_put(gt_to_fw(gt), fw_ref); return 0; } @@ -480,6 +482,7 @@ u32 xe_guc_pc_get_rpn_freq(struct xe_guc_pc *pc) int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq) { struct xe_gt *gt = pc_to_gt(pc); + unsigned int fw_ref; int ret; mutex_lock(&pc->freq_lock); @@ -493,9 +496,11 @@ int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq) * GuC SLPC plays with min freq request when GuCRC is enabled * Block RC6 for a more reliable read. */ - ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); - if (ret) - goto out; + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) { + ret = -ETIMEDOUT; + goto fw; + } ret = pc_action_query_task_state(pc); if (ret) @@ -504,7 +509,7 @@ int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq) *freq = pc_get_min_freq(pc); fw: - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); + xe_force_wake_put(gt_to_fw(gt), fw_ref); out: mutex_unlock(&pc->freq_lock); return ret; @@ -855,6 +860,7 @@ int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc) { struct xe_device *xe = pc_to_xe(pc); struct xe_gt *gt = pc_to_gt(pc); + unsigned int fw_ref; int ret = 0; if (xe->info.skip_guc_pc) @@ -864,13 +870,15 @@ int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc) if (ret) return ret; - ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); - if (ret) - return ret; + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) { + xe_force_wake_put(gt_to_fw(gt), fw_ref); + return -ETIMEDOUT; + } xe_gt_idle_disable_c6(gt); - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); + xe_force_wake_put(gt_to_fw(gt), fw_ref); return 0; } @@ -956,13 +964,16 @@ int xe_guc_pc_start(struct xe_guc_pc *pc) struct xe_device *xe = pc_to_xe(pc); struct xe_gt *gt = pc_to_gt(pc); u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data)); + unsigned int fw_ref; int ret; xe_gt_assert(gt, xe_device_uc_enabled(xe)); - ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); - if (ret) - return ret; + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) { + xe_force_wake_put(gt_to_fw(gt), fw_ref); + return -ETIMEDOUT; + } if (xe->info.skip_guc_pc) { if (xe->info.platform != XE_PVC) @@ -1005,7 +1016,7 @@ int xe_guc_pc_start(struct xe_guc_pc *pc) ret = pc_action_setup_gucrc(pc, GUCRC_FIRMWARE_CONTROL); out: - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); + xe_force_wake_put(gt_to_fw(gt), fw_ref); return ret; } @@ -1037,18 +1048,19 @@ static void xe_guc_pc_fini_hw(void *arg) { struct xe_guc_pc *pc = arg; struct xe_device *xe = pc_to_xe(pc); + unsigned int fw_ref; if (xe_device_wedged(xe)) return; - XE_WARN_ON(xe_force_wake_get(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL)); + fw_ref = xe_force_wake_get(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL); xe_guc_pc_gucrc_disable(pc); XE_WARN_ON(xe_guc_pc_stop(pc)); /* Bind requested freq to mert_freq_cap before unload */ pc_set_cur_freq(pc, min(pc_max_freq_cap(pc), pc->rpe_freq)); - xe_force_wake_put(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL); + xe_force_wake_put(gt_to_fw(pc_to_gt(pc)), fw_ref); } /** diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c index c1ebc693a617..6f4a9812b4f4 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.c +++ b/drivers/gpu/drm/xe/xe_guc_submit.c @@ -225,80 +225,11 @@ static bool exec_queue_killed_or_banned_or_wedged(struct xe_exec_queue *q) EXEC_QUEUE_STATE_BANNED)); } -#ifdef CONFIG_PROVE_LOCKING -static int alloc_submit_wq(struct xe_guc *guc) -{ - int i; - - for (i = 0; i < NUM_SUBMIT_WQ; ++i) { - guc->submission_state.submit_wq_pool[i] = - alloc_ordered_workqueue("submit_wq", 0); - if (!guc->submission_state.submit_wq_pool[i]) - goto err_free; - } - - return 0; - -err_free: - while (i) - destroy_workqueue(guc->submission_state.submit_wq_pool[--i]); - - return -ENOMEM; -} - -static void free_submit_wq(struct xe_guc *guc) -{ - int i; - - for (i = 0; i < NUM_SUBMIT_WQ; ++i) - destroy_workqueue(guc->submission_state.submit_wq_pool[i]); -} - -static struct workqueue_struct *get_submit_wq(struct xe_guc *guc) -{ - int idx = guc->submission_state.submit_wq_idx++ % NUM_SUBMIT_WQ; - - return guc->submission_state.submit_wq_pool[idx]; -} -#else -static int alloc_submit_wq(struct xe_guc *guc) -{ - return 0; -} - -static void free_submit_wq(struct xe_guc *guc) -{ - -} - -static struct workqueue_struct *get_submit_wq(struct xe_guc *guc) -{ - return NULL; -} -#endif - -static void xe_guc_submit_fini(struct xe_guc *guc) -{ - struct xe_device *xe = guc_to_xe(guc); - struct xe_gt *gt = guc_to_gt(guc); - int ret; - - ret = wait_event_timeout(guc->submission_state.fini_wq, - xa_empty(&guc->submission_state.exec_queue_lookup), - HZ * 5); - - drain_workqueue(xe->destroy_wq); - - xe_gt_assert(gt, ret); -} - static void guc_submit_fini(struct drm_device *drm, void *arg) { struct xe_guc *guc = arg; - xe_guc_submit_fini(guc); xa_destroy(&guc->submission_state.exec_queue_lookup); - free_submit_wq(guc); } static void guc_submit_wedged_fini(void *arg) @@ -360,10 +291,6 @@ int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids) if (err) return err; - err = alloc_submit_wq(guc); - if (err) - return err; - gt->exec_queue_ops = &guc_exec_queue_ops; xa_init(&guc->submission_state.exec_queue_lookup); @@ -790,6 +717,7 @@ guc_exec_queue_run_job(struct drm_sched_job *drm_job) struct xe_exec_queue *q = job->q; struct xe_guc *guc = exec_queue_to_guc(q); struct xe_device *xe = guc_to_xe(guc); + struct dma_fence *fence = NULL; bool lr = xe_exec_queue_is_lr(q); xe_assert(xe, !(exec_queue_destroyed(q) || exec_queue_pending_disable(q)) || @@ -807,20 +735,18 @@ guc_exec_queue_run_job(struct drm_sched_job *drm_job) if (lr) { xe_sched_job_set_error(job, -EOPNOTSUPP); - return NULL; - } else if (test_and_set_bit(JOB_FLAG_SUBMIT, &job->fence->flags)) { - return job->fence; + dma_fence_put(job->fence); /* Drop ref from xe_sched_job_arm */ } else { - return dma_fence_get(job->fence); + fence = job->fence; } + + return fence; } static void guc_exec_queue_free_job(struct drm_sched_job *drm_job) { struct xe_sched_job *job = to_xe_sched_job(drm_job); - xe_exec_queue_update_run_ticks(job->q); - trace_xe_sched_job_free(job); xe_sched_job_put(job); } @@ -841,17 +767,19 @@ static void disable_scheduling_deregister(struct xe_guc *guc, struct xe_exec_queue *q) { MAKE_SCHED_CONTEXT_ACTION(q, DISABLE); - struct xe_device *xe = guc_to_xe(guc); int ret; set_min_preemption_timeout(guc, q); smp_rmb(); - ret = wait_event_timeout(guc->ct.wq, !exec_queue_pending_enable(q) || - xe_guc_read_stopped(guc), HZ * 5); + ret = wait_event_timeout(guc->ct.wq, + (!exec_queue_pending_enable(q) && + !exec_queue_pending_disable(q)) || + xe_guc_read_stopped(guc), + HZ * 5); if (!ret) { struct xe_gpu_scheduler *sched = &q->guc->sched; - drm_warn(&xe->drm, "Pending enable failed to respond"); + xe_gt_warn(q->gt, "Pending enable/disable failed to respond\n"); xe_sched_submission_start(sched); xe_gt_reset_async(q->gt); xe_sched_tdr_queue_imm(sched); @@ -990,12 +918,22 @@ static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w) static bool check_timeout(struct xe_exec_queue *q, struct xe_sched_job *job) { struct xe_gt *gt = guc_to_gt(exec_queue_to_guc(q)); - u32 ctx_timestamp = xe_lrc_ctx_timestamp(q->lrc[0]); - u32 ctx_job_timestamp = xe_lrc_ctx_job_timestamp(q->lrc[0]); + u32 ctx_timestamp, ctx_job_timestamp; u32 timeout_ms = q->sched_props.job_timeout_ms; u32 diff; u64 running_time_ms; + if (!xe_sched_job_started(job)) { + xe_gt_warn(gt, "Check job timeout: seqno=%u, lrc_seqno=%u, guc_id=%d, not started", + xe_sched_job_seqno(job), xe_sched_job_lrc_seqno(job), + q->guc->id); + + return xe_sched_invalidate_job(job, 2); + } + + ctx_timestamp = xe_lrc_ctx_timestamp(q->lrc[0]); + ctx_job_timestamp = xe_lrc_ctx_job_timestamp(q->lrc[0]); + /* * Counter wraps at ~223s at the usual 19.2MHz, be paranoid catch * possible overflows with a high timeout. @@ -1098,6 +1036,7 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job) struct xe_guc *guc = exec_queue_to_guc(q); const char *process_name = "no process"; struct xe_device *xe = guc_to_xe(guc); + unsigned int fw_ref; int err = -ETIME; pid_t pid = -1; int i = 0; @@ -1105,10 +1044,13 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job) /* * TDR has fired before free job worker. Common if exec queue - * immediately closed after last fence signaled. + * immediately closed after last fence signaled. Add back to pending + * list so job can be freed and kick scheduler ensuring free job is not + * lost. */ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &job->fence->flags)) { - guc_exec_queue_free_job(drm_job); + xe_sched_add_pending_job(sched, job); + xe_sched_submission_start(sched); return DRM_GPU_SCHED_STAT_NOMINAL; } @@ -1121,10 +1063,6 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job) exec_queue_killed_or_banned_or_wedged(q) || exec_queue_destroyed(q); - /* Job hasn't started, can't be timed out */ - if (!skip_timeout_check && !xe_sched_job_started(job)) - goto rearm; - /* * If devcoredump not captured and GuC capture for the job is not ready * do manual capture first and decide later if we need to use it @@ -1132,12 +1070,13 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job) if (!exec_queue_killed(q) && !xe->devcoredump.captured && !xe_guc_capture_get_matching_and_lock(job)) { /* take force wake before engine register manual capture */ - if (xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL)) + fw_ref = xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) xe_gt_info(q->gt, "failed to get forcewake for coredump capture\n"); xe_engine_snapshot_capture_for_job(job); - xe_force_wake_put(gt_to_fw(q->gt), XE_FORCEWAKE_ALL); + xe_force_wake_put(gt_to_fw(q->gt), fw_ref); } /* @@ -1162,7 +1101,8 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job) * modifying state */ ret = wait_event_timeout(guc->ct.wq, - !exec_queue_pending_enable(q) || + (!exec_queue_pending_enable(q) && + !exec_queue_pending_disable(q)) || xe_guc_read_stopped(guc), HZ * 5); if (!ret || xe_guc_read_stopped(guc)) goto trigger_reset; @@ -1391,8 +1331,8 @@ static void __guc_exec_queue_process_msg_suspend(struct xe_sched_msg *msg) if (guc_exec_queue_allowed_to_change_state(q) && !exec_queue_suspended(q) && exec_queue_enabled(q)) { - wait_event(guc->ct.wq, q->guc->resume_time != RESUME_PENDING || - xe_guc_read_stopped(guc)); + wait_event(guc->ct.wq, (q->guc->resume_time != RESUME_PENDING || + xe_guc_read_stopped(guc)) && !exec_queue_pending_disable(q)); if (!xe_guc_read_stopped(guc)) { s64 since_resume_ms = @@ -1496,8 +1436,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q) timeout = (q->vm && xe_vm_in_lr_mode(q->vm)) ? MAX_SCHEDULE_TIMEOUT : msecs_to_jiffies(q->sched_props.job_timeout_ms); err = xe_sched_init(&ge->sched, &drm_sched_ops, &xe_sched_ops, - get_submit_wq(guc), - q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES, 64, + NULL, q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES, 64, timeout, guc_to_gt(guc)->ordered_wq, NULL, q->name, gt_to_xe(q->gt)->drm.dev); if (err) @@ -1814,8 +1753,13 @@ void xe_guc_submit_stop(struct xe_guc *guc) mutex_lock(&guc->submission_state.lock); - xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) + xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) { + /* Prevent redundant attempts to stop parallel queues */ + if (q->guc->id != index) + continue; + guc_exec_queue_stop(guc, q); + } mutex_unlock(&guc->submission_state.lock); @@ -1853,8 +1797,13 @@ int xe_guc_submit_start(struct xe_guc *guc) mutex_lock(&guc->submission_state.lock); atomic_dec(&guc->submission_state.stopped); - xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) + xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) { + /* Prevent redundant attempts to start parallel queues */ + if (q->guc->id != index) + continue; + guc_exec_queue_start(q); + } mutex_unlock(&guc->submission_state.lock); wake_up_all(&guc->ct.wq); @@ -1920,16 +1869,29 @@ static void handle_sched_done(struct xe_guc *guc, struct xe_exec_queue *q, xe_gt_assert(guc_to_gt(guc), runnable_state == 0); xe_gt_assert(guc_to_gt(guc), exec_queue_pending_disable(q)); - clear_exec_queue_pending_disable(q); if (q->guc->suspend_pending) { suspend_fence_signal(q); + clear_exec_queue_pending_disable(q); } else { if (exec_queue_banned(q) || check_timeout) { smp_wmb(); wake_up_all(&guc->ct.wq); } - if (!check_timeout) + if (!check_timeout && exec_queue_destroyed(q)) { + /* + * Make sure to clear the pending_disable only + * after sampling the destroyed state. We want + * to ensure we don't trigger the unregister too + * early with something intending to only + * disable scheduling. The caller doing the + * destroy must wait for an ongoing + * pending_disable before marking as destroyed. + */ + clear_exec_queue_pending_disable(q); deregister_exec_queue(guc, q); + } else { + clear_exec_queue_pending_disable(q); + } } } } diff --git a/drivers/gpu/drm/xe/xe_guc_types.h b/drivers/gpu/drm/xe/xe_guc_types.h index ebc5a2237648..fa75f57bf5da 100644 --- a/drivers/gpu/drm/xe/xe_guc_types.h +++ b/drivers/gpu/drm/xe/xe_guc_types.h @@ -74,13 +74,6 @@ struct xe_guc { atomic_t stopped; /** @submission_state.lock: protects submission state */ struct mutex lock; -#ifdef CONFIG_PROVE_LOCKING -#define NUM_SUBMIT_WQ 256 - /** @submission_state.submit_wq_pool: submission ordered workqueues pool */ - struct workqueue_struct *submit_wq_pool[NUM_SUBMIT_WQ]; - /** @submission_state.submit_wq_idx: submission ordered workqueue index */ - int submit_wq_idx; -#endif /** @submission_state.enabled: submission is enabled */ bool enabled; /** @submission_state.fini_wq: submit fini wait queue */ diff --git a/drivers/gpu/drm/xe/xe_huc.c b/drivers/gpu/drm/xe/xe_huc.c index 77c5830309cf..6a846e4cb221 100644 --- a/drivers/gpu/drm/xe/xe_huc.c +++ b/drivers/gpu/drm/xe/xe_huc.c @@ -296,19 +296,19 @@ void xe_huc_sanitize(struct xe_huc *huc) void xe_huc_print_info(struct xe_huc *huc, struct drm_printer *p) { struct xe_gt *gt = huc_to_gt(huc); - int err; + unsigned int fw_ref; xe_uc_fw_print(&huc->fw, p); if (!xe_uc_fw_is_enabled(&huc->fw)) return; - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (err) + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) return; drm_printf(p, "\nHuC status: 0x%08x\n", xe_mmio_read32(>->mmio, HUC_KERNEL_LOAD_INFO)); - xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index cfd31ae49cc1..1b97d90aadda 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -209,7 +209,8 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, num_entries * XE_PAGE_SIZE, ttm_bo_type_kernel, XE_BO_FLAG_VRAM_IF_DGFX(tile) | - XE_BO_FLAG_PINNED); + XE_BO_FLAG_PINNED | + XE_BO_FLAG_PAGETABLE); if (IS_ERR(bo)) return PTR_ERR(bo); @@ -1350,6 +1351,7 @@ __xe_migrate_update_pgtables(struct xe_migrate *m, /* For sysmem PTE's, need to map them in our hole.. */ if (!IS_DGFX(xe)) { + u16 pat_index = xe->pat.idx[XE_CACHE_WB]; u32 ptes, ofs; ppgtt_ofs = NUM_KERNEL_PDE - 1; @@ -1409,7 +1411,7 @@ __xe_migrate_update_pgtables(struct xe_migrate *m, pt_bo->update_index = current_update; addr = vm->pt_ops->pte_encode_bo(pt_bo, 0, - XE_CACHE_WB, 0); + pat_index, 0); bb->cs[bb->len++] = lower_32_bits(addr); bb->cs[bb->len++] = upper_32_bits(addr); } diff --git a/drivers/gpu/drm/xe/xe_mocs.c b/drivers/gpu/drm/xe/xe_mocs.c index 231d0e86ed83..54d199b5cfb2 100644 --- a/drivers/gpu/drm/xe/xe_mocs.c +++ b/drivers/gpu/drm/xe/xe_mocs.c @@ -774,25 +774,21 @@ void xe_mocs_init(struct xe_gt *gt) void xe_mocs_dump(struct xe_gt *gt, struct drm_printer *p) { - struct xe_mocs_info table; - unsigned int flags; - u32 ret; struct xe_device *xe = gt_to_xe(gt); + struct xe_mocs_info table; + unsigned int fw_ref, flags; flags = get_mocs_settings(xe, &table); xe_pm_runtime_get_noresume(xe); - ret = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - - if (ret) + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) goto err_fw; table.ops->dump(&table, flags, gt, p); - xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); - + xe_force_wake_put(gt_to_fw(gt), fw_ref); err_fw: - xe_assert(xe, !ret); xe_pm_runtime_put(xe); } diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c index bbe03db0c401..8dd55798ab31 100644 --- a/drivers/gpu/drm/xe/xe_oa.c +++ b/drivers/gpu/drm/xe/xe_oa.c @@ -36,11 +36,22 @@ #include "xe_pm.h" #include "xe_sched_job.h" #include "xe_sriov.h" +#include "xe_sync.h" #define DEFAULT_POLL_FREQUENCY_HZ 200 #define DEFAULT_POLL_PERIOD_NS (NSEC_PER_SEC / DEFAULT_POLL_FREQUENCY_HZ) #define XE_OA_UNIT_INVALID U32_MAX +enum xe_oa_submit_deps { + XE_OA_SUBMIT_NO_DEPS, + XE_OA_SUBMIT_ADD_DEPS, +}; + +enum xe_oa_user_extn_from { + XE_OA_USER_EXTN_FROM_OPEN, + XE_OA_USER_EXTN_FROM_CONFIG, +}; + struct xe_oa_reg { struct xe_reg addr; u32 value; @@ -70,6 +81,7 @@ struct flex { }; struct xe_oa_open_param { + struct xe_file *xef; u32 oa_unit_id; bool sample; u32 metric_set; @@ -81,6 +93,9 @@ struct xe_oa_open_param { struct xe_exec_queue *exec_q; struct xe_hw_engine *hwe; bool no_preempt; + struct drm_xe_sync __user *syncs_user; + int num_syncs; + struct xe_sync_entry *syncs; }; struct xe_oa_config_bo { @@ -90,6 +105,17 @@ struct xe_oa_config_bo { struct xe_bb *bb; }; +struct xe_oa_fence { + /* @base: dma fence base */ + struct dma_fence base; + /* @lock: lock for the fence */ + spinlock_t lock; + /* @work: work to signal @base */ + struct delayed_work work; + /* @cb: callback to schedule @work */ + struct dma_fence_cb cb; +}; + #define DRM_FMT(x) DRM_XE_OA_FMT_TYPE_##x static const struct xe_oa_format oa_formats[] = { @@ -162,10 +188,10 @@ static struct xe_oa_config *xe_oa_get_oa_config(struct xe_oa *oa, int metrics_se return oa_config; } -static void free_oa_config_bo(struct xe_oa_config_bo *oa_bo) +static void free_oa_config_bo(struct xe_oa_config_bo *oa_bo, struct dma_fence *last_fence) { xe_oa_config_put(oa_bo->oa_config); - xe_bb_free(oa_bo->bb, NULL); + xe_bb_free(oa_bo->bb, last_fence); kfree(oa_bo); } @@ -570,11 +596,11 @@ static __poll_t xe_oa_poll(struct file *file, poll_table *wait) return ret; } -static int xe_oa_submit_bb(struct xe_oa_stream *stream, struct xe_bb *bb) +static struct dma_fence *xe_oa_submit_bb(struct xe_oa_stream *stream, enum xe_oa_submit_deps deps, + struct xe_bb *bb) { struct xe_sched_job *job; struct dma_fence *fence; - long timeout; int err = 0; /* Kernel configuration is issued on stream->k_exec_q, not stream->exec_q */ @@ -584,18 +610,24 @@ static int xe_oa_submit_bb(struct xe_oa_stream *stream, struct xe_bb *bb) goto exit; } + if (deps == XE_OA_SUBMIT_ADD_DEPS) { + for (int i = 0; i < stream->num_syncs && !err; i++) + err = xe_sync_entry_add_deps(&stream->syncs[i], job); + if (err) { + drm_dbg(&stream->oa->xe->drm, "xe_sync_entry_add_deps err %d\n", err); + goto err_put_job; + } + } + xe_sched_job_arm(job); fence = dma_fence_get(&job->drm.s_fence->finished); xe_sched_job_push(job); - timeout = dma_fence_wait_timeout(fence, false, HZ); - dma_fence_put(fence); - if (timeout < 0) - err = timeout; - else if (!timeout) - err = -ETIME; + return fence; +err_put_job: + xe_sched_job_put(job); exit: - return err; + return ERR_PTR(err); } static void write_cs_mi_lri(struct xe_bb *bb, const struct xe_oa_reg *reg_data, u32 n_regs) @@ -639,7 +671,8 @@ static void xe_oa_free_configs(struct xe_oa_stream *stream) xe_oa_config_put(stream->oa_config); llist_for_each_entry_safe(oa_bo, tmp, stream->oa_config_bos.first, node) - free_oa_config_bo(oa_bo); + free_oa_config_bo(oa_bo, stream->last_fence); + dma_fence_put(stream->last_fence); } static void xe_oa_store_flex(struct xe_oa_stream *stream, struct xe_lrc *lrc, @@ -659,6 +692,7 @@ static void xe_oa_store_flex(struct xe_oa_stream *stream, struct xe_lrc *lrc, static int xe_oa_modify_ctx_image(struct xe_oa_stream *stream, struct xe_lrc *lrc, const struct flex *flex, u32 count) { + struct dma_fence *fence; struct xe_bb *bb; int err; @@ -670,7 +704,16 @@ static int xe_oa_modify_ctx_image(struct xe_oa_stream *stream, struct xe_lrc *lr xe_oa_store_flex(stream, lrc, bb, flex, count); - err = xe_oa_submit_bb(stream, bb); + fence = xe_oa_submit_bb(stream, XE_OA_SUBMIT_NO_DEPS, bb); + if (IS_ERR(fence)) { + err = PTR_ERR(fence); + goto free_bb; + } + xe_bb_free(bb, fence); + dma_fence_put(fence); + + return 0; +free_bb: xe_bb_free(bb, NULL); exit: return err; @@ -678,6 +721,7 @@ exit: static int xe_oa_load_with_lri(struct xe_oa_stream *stream, struct xe_oa_reg *reg_lri) { + struct dma_fence *fence; struct xe_bb *bb; int err; @@ -689,7 +733,16 @@ static int xe_oa_load_with_lri(struct xe_oa_stream *stream, struct xe_oa_reg *re write_cs_mi_lri(bb, reg_lri, 1); - err = xe_oa_submit_bb(stream, bb); + fence = xe_oa_submit_bb(stream, XE_OA_SUBMIT_NO_DEPS, bb); + if (IS_ERR(fence)) { + err = PTR_ERR(fence); + goto free_bb; + } + xe_bb_free(bb, fence); + dma_fence_put(fence); + + return 0; +free_bb: xe_bb_free(bb, NULL); exit: return err; @@ -837,7 +890,7 @@ static void xe_oa_stream_destroy(struct xe_oa_stream *stream) xe_oa_free_oa_buffer(stream); - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); + xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL); xe_pm_runtime_put(stream->oa->xe); /* Wa_1509372804:pvc: Unset the override of GUCRC mode to enable rc6 */ @@ -845,6 +898,7 @@ static void xe_oa_stream_destroy(struct xe_oa_stream *stream) xe_gt_WARN_ON(gt, xe_guc_pc_unset_gucrc_mode(>->uc.guc.pc)); xe_oa_free_configs(stream); + xe_file_put(stream->xef); } static int xe_oa_alloc_oa_buffer(struct xe_oa_stream *stream) @@ -915,11 +969,62 @@ out: return oa_bo; } +static void xe_oa_update_last_fence(struct xe_oa_stream *stream, struct dma_fence *fence) +{ + dma_fence_put(stream->last_fence); + stream->last_fence = dma_fence_get(fence); +} + +static void xe_oa_fence_work_fn(struct work_struct *w) +{ + struct xe_oa_fence *ofence = container_of(w, typeof(*ofence), work.work); + + /* Signal fence to indicate new OA configuration is active */ + dma_fence_signal(&ofence->base); + dma_fence_put(&ofence->base); +} + +static void xe_oa_config_cb(struct dma_fence *fence, struct dma_fence_cb *cb) +{ + /* Additional empirical delay needed for NOA programming after registers are written */ +#define NOA_PROGRAM_ADDITIONAL_DELAY_US 500 + + struct xe_oa_fence *ofence = container_of(cb, typeof(*ofence), cb); + + INIT_DELAYED_WORK(&ofence->work, xe_oa_fence_work_fn); + queue_delayed_work(system_unbound_wq, &ofence->work, + usecs_to_jiffies(NOA_PROGRAM_ADDITIONAL_DELAY_US)); + dma_fence_put(fence); +} + +static const char *xe_oa_get_driver_name(struct dma_fence *fence) +{ + return "xe_oa"; +} + +static const char *xe_oa_get_timeline_name(struct dma_fence *fence) +{ + return "unbound"; +} + +static const struct dma_fence_ops xe_oa_fence_ops = { + .get_driver_name = xe_oa_get_driver_name, + .get_timeline_name = xe_oa_get_timeline_name, +}; + static int xe_oa_emit_oa_config(struct xe_oa_stream *stream, struct xe_oa_config *config) { #define NOA_PROGRAM_ADDITIONAL_DELAY_US 500 struct xe_oa_config_bo *oa_bo; - int err, us = NOA_PROGRAM_ADDITIONAL_DELAY_US; + struct xe_oa_fence *ofence; + int i, err, num_signal = 0; + struct dma_fence *fence; + + ofence = kzalloc(sizeof(*ofence), GFP_KERNEL); + if (!ofence) { + err = -ENOMEM; + goto exit; + } oa_bo = xe_oa_alloc_config_buffer(stream, config); if (IS_ERR(oa_bo)) { @@ -927,11 +1032,50 @@ static int xe_oa_emit_oa_config(struct xe_oa_stream *stream, struct xe_oa_config goto exit; } - err = xe_oa_submit_bb(stream, oa_bo->bb); + /* Emit OA configuration batch */ + fence = xe_oa_submit_bb(stream, XE_OA_SUBMIT_ADD_DEPS, oa_bo->bb); + if (IS_ERR(fence)) { + err = PTR_ERR(fence); + goto exit; + } - /* Additional empirical delay needed for NOA programming after registers are written */ - usleep_range(us, 2 * us); + /* Point of no return: initialize and set fence to signal */ + spin_lock_init(&ofence->lock); + dma_fence_init(&ofence->base, &xe_oa_fence_ops, &ofence->lock, 0, 0); + + for (i = 0; i < stream->num_syncs; i++) { + if (stream->syncs[i].flags & DRM_XE_SYNC_FLAG_SIGNAL) + num_signal++; + xe_sync_entry_signal(&stream->syncs[i], &ofence->base); + } + + /* Additional dma_fence_get in case we dma_fence_wait */ + if (!num_signal) + dma_fence_get(&ofence->base); + + /* Update last fence too before adding callback */ + xe_oa_update_last_fence(stream, fence); + + /* Add job fence callback to schedule work to signal ofence->base */ + err = dma_fence_add_callback(fence, &ofence->cb, xe_oa_config_cb); + xe_gt_assert(stream->gt, !err || err == -ENOENT); + if (err == -ENOENT) + xe_oa_config_cb(fence, &ofence->cb); + + /* If nothing needs to be signaled we wait synchronously */ + if (!num_signal) { + dma_fence_wait(&ofence->base, false); + dma_fence_put(&ofence->base); + } + + /* Done with syncs */ + for (i = 0; i < stream->num_syncs; i++) + xe_sync_entry_cleanup(&stream->syncs[i]); + kfree(stream->syncs); + + return 0; exit: + kfree(ofence); return err; } @@ -1003,6 +1147,262 @@ static int xe_oa_enable_metric_set(struct xe_oa_stream *stream) return xe_oa_emit_oa_config(stream, stream->oa_config); } +static int decode_oa_format(struct xe_oa *oa, u64 fmt, enum xe_oa_format_name *name) +{ + u32 counter_size = FIELD_GET(DRM_XE_OA_FORMAT_MASK_COUNTER_SIZE, fmt); + u32 counter_sel = FIELD_GET(DRM_XE_OA_FORMAT_MASK_COUNTER_SEL, fmt); + u32 bc_report = FIELD_GET(DRM_XE_OA_FORMAT_MASK_BC_REPORT, fmt); + u32 type = FIELD_GET(DRM_XE_OA_FORMAT_MASK_FMT_TYPE, fmt); + int idx; + + for_each_set_bit(idx, oa->format_mask, __XE_OA_FORMAT_MAX) { + const struct xe_oa_format *f = &oa->oa_formats[idx]; + + if (counter_size == f->counter_size && bc_report == f->bc_report && + type == f->type && counter_sel == f->counter_select) { + *name = idx; + return 0; + } + } + + return -EINVAL; +} + +static int xe_oa_set_prop_oa_unit_id(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param) +{ + if (value >= oa->oa_unit_ids) { + drm_dbg(&oa->xe->drm, "OA unit ID out of range %lld\n", value); + return -EINVAL; + } + param->oa_unit_id = value; + return 0; +} + +static int xe_oa_set_prop_sample_oa(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param) +{ + param->sample = value; + return 0; +} + +static int xe_oa_set_prop_metric_set(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param) +{ + param->metric_set = value; + return 0; +} + +static int xe_oa_set_prop_oa_format(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param) +{ + int ret = decode_oa_format(oa, value, ¶m->oa_format); + + if (ret) { + drm_dbg(&oa->xe->drm, "Unsupported OA report format %#llx\n", value); + return ret; + } + return 0; +} + +static int xe_oa_set_prop_oa_exponent(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param) +{ +#define OA_EXPONENT_MAX 31 + + if (value > OA_EXPONENT_MAX) { + drm_dbg(&oa->xe->drm, "OA timer exponent too high (> %u)\n", OA_EXPONENT_MAX); + return -EINVAL; + } + param->period_exponent = value; + return 0; +} + +static int xe_oa_set_prop_disabled(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param) +{ + param->disabled = value; + return 0; +} + +static int xe_oa_set_prop_exec_queue_id(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param) +{ + param->exec_queue_id = value; + return 0; +} + +static int xe_oa_set_prop_engine_instance(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param) +{ + param->engine_instance = value; + return 0; +} + +static int xe_oa_set_no_preempt(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param) +{ + param->no_preempt = value; + return 0; +} + +static int xe_oa_set_prop_num_syncs(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param) +{ + param->num_syncs = value; + return 0; +} + +static int xe_oa_set_prop_syncs_user(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param) +{ + param->syncs_user = u64_to_user_ptr(value); + return 0; +} + +static int xe_oa_set_prop_ret_inval(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param) +{ + return -EINVAL; +} + +typedef int (*xe_oa_set_property_fn)(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param); +static const xe_oa_set_property_fn xe_oa_set_property_funcs_open[] = { + [DRM_XE_OA_PROPERTY_OA_UNIT_ID] = xe_oa_set_prop_oa_unit_id, + [DRM_XE_OA_PROPERTY_SAMPLE_OA] = xe_oa_set_prop_sample_oa, + [DRM_XE_OA_PROPERTY_OA_METRIC_SET] = xe_oa_set_prop_metric_set, + [DRM_XE_OA_PROPERTY_OA_FORMAT] = xe_oa_set_prop_oa_format, + [DRM_XE_OA_PROPERTY_OA_PERIOD_EXPONENT] = xe_oa_set_prop_oa_exponent, + [DRM_XE_OA_PROPERTY_OA_DISABLED] = xe_oa_set_prop_disabled, + [DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID] = xe_oa_set_prop_exec_queue_id, + [DRM_XE_OA_PROPERTY_OA_ENGINE_INSTANCE] = xe_oa_set_prop_engine_instance, + [DRM_XE_OA_PROPERTY_NO_PREEMPT] = xe_oa_set_no_preempt, + [DRM_XE_OA_PROPERTY_NUM_SYNCS] = xe_oa_set_prop_num_syncs, + [DRM_XE_OA_PROPERTY_SYNCS] = xe_oa_set_prop_syncs_user, +}; + +static const xe_oa_set_property_fn xe_oa_set_property_funcs_config[] = { + [DRM_XE_OA_PROPERTY_OA_UNIT_ID] = xe_oa_set_prop_ret_inval, + [DRM_XE_OA_PROPERTY_SAMPLE_OA] = xe_oa_set_prop_ret_inval, + [DRM_XE_OA_PROPERTY_OA_METRIC_SET] = xe_oa_set_prop_metric_set, + [DRM_XE_OA_PROPERTY_OA_FORMAT] = xe_oa_set_prop_ret_inval, + [DRM_XE_OA_PROPERTY_OA_PERIOD_EXPONENT] = xe_oa_set_prop_ret_inval, + [DRM_XE_OA_PROPERTY_OA_DISABLED] = xe_oa_set_prop_ret_inval, + [DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID] = xe_oa_set_prop_ret_inval, + [DRM_XE_OA_PROPERTY_OA_ENGINE_INSTANCE] = xe_oa_set_prop_ret_inval, + [DRM_XE_OA_PROPERTY_NO_PREEMPT] = xe_oa_set_prop_ret_inval, + [DRM_XE_OA_PROPERTY_NUM_SYNCS] = xe_oa_set_prop_num_syncs, + [DRM_XE_OA_PROPERTY_SYNCS] = xe_oa_set_prop_syncs_user, +}; + +static int xe_oa_user_ext_set_property(struct xe_oa *oa, enum xe_oa_user_extn_from from, + u64 extension, struct xe_oa_open_param *param) +{ + u64 __user *address = u64_to_user_ptr(extension); + struct drm_xe_ext_set_property ext; + int err; + u32 idx; + + err = __copy_from_user(&ext, address, sizeof(ext)); + if (XE_IOCTL_DBG(oa->xe, err)) + return -EFAULT; + + BUILD_BUG_ON(ARRAY_SIZE(xe_oa_set_property_funcs_open) != + ARRAY_SIZE(xe_oa_set_property_funcs_config)); + + if (XE_IOCTL_DBG(oa->xe, ext.property >= ARRAY_SIZE(xe_oa_set_property_funcs_open)) || + XE_IOCTL_DBG(oa->xe, ext.pad)) + return -EINVAL; + + idx = array_index_nospec(ext.property, ARRAY_SIZE(xe_oa_set_property_funcs_open)); + + if (from == XE_OA_USER_EXTN_FROM_CONFIG) + return xe_oa_set_property_funcs_config[idx](oa, ext.value, param); + else + return xe_oa_set_property_funcs_open[idx](oa, ext.value, param); +} + +typedef int (*xe_oa_user_extension_fn)(struct xe_oa *oa, enum xe_oa_user_extn_from from, + u64 extension, struct xe_oa_open_param *param); +static const xe_oa_user_extension_fn xe_oa_user_extension_funcs[] = { + [DRM_XE_OA_EXTENSION_SET_PROPERTY] = xe_oa_user_ext_set_property, +}; + +#define MAX_USER_EXTENSIONS 16 +static int xe_oa_user_extensions(struct xe_oa *oa, enum xe_oa_user_extn_from from, u64 extension, + int ext_number, struct xe_oa_open_param *param) +{ + u64 __user *address = u64_to_user_ptr(extension); + struct drm_xe_user_extension ext; + int err; + u32 idx; + + if (XE_IOCTL_DBG(oa->xe, ext_number >= MAX_USER_EXTENSIONS)) + return -E2BIG; + + err = __copy_from_user(&ext, address, sizeof(ext)); + if (XE_IOCTL_DBG(oa->xe, err)) + return -EFAULT; + + if (XE_IOCTL_DBG(oa->xe, ext.pad) || + XE_IOCTL_DBG(oa->xe, ext.name >= ARRAY_SIZE(xe_oa_user_extension_funcs))) + return -EINVAL; + + idx = array_index_nospec(ext.name, ARRAY_SIZE(xe_oa_user_extension_funcs)); + err = xe_oa_user_extension_funcs[idx](oa, from, extension, param); + if (XE_IOCTL_DBG(oa->xe, err)) + return err; + + if (ext.next_extension) + return xe_oa_user_extensions(oa, from, ext.next_extension, ++ext_number, param); + + return 0; +} + +static int xe_oa_parse_syncs(struct xe_oa *oa, struct xe_oa_open_param *param) +{ + int ret, num_syncs, num_ufence = 0; + + if (param->num_syncs && !param->syncs_user) { + drm_dbg(&oa->xe->drm, "num_syncs specified without sync array\n"); + ret = -EINVAL; + goto exit; + } + + if (param->num_syncs) { + param->syncs = kcalloc(param->num_syncs, sizeof(*param->syncs), GFP_KERNEL); + if (!param->syncs) { + ret = -ENOMEM; + goto exit; + } + } + + for (num_syncs = 0; num_syncs < param->num_syncs; num_syncs++) { + ret = xe_sync_entry_parse(oa->xe, param->xef, ¶m->syncs[num_syncs], + ¶m->syncs_user[num_syncs], 0); + if (ret) + goto err_syncs; + + if (xe_sync_is_ufence(¶m->syncs[num_syncs])) + num_ufence++; + } + + if (XE_IOCTL_DBG(oa->xe, num_ufence > 1)) { + ret = -EINVAL; + goto err_syncs; + } + + return 0; + +err_syncs: + while (num_syncs--) + xe_sync_entry_cleanup(¶m->syncs[num_syncs]); + kfree(param->syncs); +exit: + return ret; +} + static void xe_oa_stream_enable(struct xe_oa_stream *stream) { stream->pollin = false; @@ -1096,36 +1496,38 @@ static int xe_oa_disable_locked(struct xe_oa_stream *stream) static long xe_oa_config_locked(struct xe_oa_stream *stream, u64 arg) { - struct drm_xe_ext_set_property ext; + struct xe_oa_open_param param = {}; long ret = stream->oa_config->id; struct xe_oa_config *config; int err; - err = __copy_from_user(&ext, u64_to_user_ptr(arg), sizeof(ext)); - if (XE_IOCTL_DBG(stream->oa->xe, err)) - return -EFAULT; - - if (XE_IOCTL_DBG(stream->oa->xe, ext.pad) || - XE_IOCTL_DBG(stream->oa->xe, ext.base.name != DRM_XE_OA_EXTENSION_SET_PROPERTY) || - XE_IOCTL_DBG(stream->oa->xe, ext.base.next_extension) || - XE_IOCTL_DBG(stream->oa->xe, ext.property != DRM_XE_OA_PROPERTY_OA_METRIC_SET)) - return -EINVAL; + err = xe_oa_user_extensions(stream->oa, XE_OA_USER_EXTN_FROM_CONFIG, arg, 0, ¶m); + if (err) + return err; - config = xe_oa_get_oa_config(stream->oa, ext.value); + config = xe_oa_get_oa_config(stream->oa, param.metric_set); if (!config) return -ENODEV; - if (config != stream->oa_config) { - err = xe_oa_emit_oa_config(stream, config); - if (!err) - config = xchg(&stream->oa_config, config); - else - ret = err; + param.xef = stream->xef; + err = xe_oa_parse_syncs(stream->oa, ¶m); + if (err) + goto err_config_put; + + stream->num_syncs = param.num_syncs; + stream->syncs = param.syncs; + + err = xe_oa_emit_oa_config(stream, config); + if (!err) { + config = xchg(&stream->oa_config, config); + drm_dbg(&stream->oa->xe->drm, "changed to oa config uuid=%s\n", + stream->oa_config->uuid); } +err_config_put: xe_oa_config_put(config); - return ret; + return err ?: ret; } static long xe_oa_status_locked(struct xe_oa_stream *stream, unsigned long arg) @@ -1212,9 +1614,11 @@ static int xe_oa_release(struct inode *inode, struct file *file) struct xe_oa_stream *stream = file->private_data; struct xe_gt *gt = stream->gt; + xe_pm_runtime_get(gt_to_xe(gt)); mutex_lock(>->oa.gt_lock); xe_oa_destroy_locked(stream); mutex_unlock(>->oa.gt_lock); + xe_pm_runtime_put(gt_to_xe(gt)); /* Release the reference the OA stream kept on the driver */ drm_dev_put(>_to_xe(gt)->drm); @@ -1353,6 +1757,7 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream, { struct xe_oa_unit *u = param->hwe->oa_unit; struct xe_gt *gt = param->hwe->gt; + unsigned int fw_ref; int ret; stream->exec_q = param->exec_q; @@ -1366,6 +1771,10 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream, stream->period_exponent = param->period_exponent; stream->no_preempt = param->no_preempt; + stream->xef = xe_file_get(param->xef); + stream->num_syncs = param->num_syncs; + stream->syncs = param->syncs; + /* * For Xe2+, when overrun mode is enabled, there are no partial reports at the end * of buffer, making the OA buffer effectively a non-power-of-2 size circular @@ -1413,7 +1822,11 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream, /* Take runtime pm ref and forcewake to disable RC6 */ xe_pm_runtime_get(stream->oa->xe); - XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL)); + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) { + ret = -ETIMEDOUT; + goto err_fw_put; + } ret = xe_oa_alloc_oa_buffer(stream); if (ret) @@ -1455,13 +1868,14 @@ err_put_k_exec_q: err_free_oa_buf: xe_oa_free_oa_buffer(stream); err_fw_put: - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); + xe_force_wake_put(gt_to_fw(gt), fw_ref); xe_pm_runtime_put(stream->oa->xe); if (stream->override_gucrc) xe_gt_WARN_ON(gt, xe_guc_pc_unset_gucrc_mode(>->uc.guc.pc)); err_free_configs: xe_oa_free_configs(stream); exit: + xe_file_put(stream->xef); return ret; } @@ -1571,27 +1985,6 @@ static bool engine_supports_oa_format(const struct xe_hw_engine *hwe, int type) } } -static int decode_oa_format(struct xe_oa *oa, u64 fmt, enum xe_oa_format_name *name) -{ - u32 counter_size = FIELD_GET(DRM_XE_OA_FORMAT_MASK_COUNTER_SIZE, fmt); - u32 counter_sel = FIELD_GET(DRM_XE_OA_FORMAT_MASK_COUNTER_SEL, fmt); - u32 bc_report = FIELD_GET(DRM_XE_OA_FORMAT_MASK_BC_REPORT, fmt); - u32 type = FIELD_GET(DRM_XE_OA_FORMAT_MASK_FMT_TYPE, fmt); - int idx; - - for_each_set_bit(idx, oa->format_mask, __XE_OA_FORMAT_MAX) { - const struct xe_oa_format *f = &oa->oa_formats[idx]; - - if (counter_size == f->counter_size && bc_report == f->bc_report && - type == f->type && counter_sel == f->counter_select) { - *name = idx; - return 0; - } - } - - return -EINVAL; -} - /** * xe_oa_unit_id - Return OA unit ID for a hardware engine * @hwe: @xe_hw_engine @@ -1638,155 +2031,6 @@ out: return ret; } -static int xe_oa_set_prop_oa_unit_id(struct xe_oa *oa, u64 value, - struct xe_oa_open_param *param) -{ - if (value >= oa->oa_unit_ids) { - drm_dbg(&oa->xe->drm, "OA unit ID out of range %lld\n", value); - return -EINVAL; - } - param->oa_unit_id = value; - return 0; -} - -static int xe_oa_set_prop_sample_oa(struct xe_oa *oa, u64 value, - struct xe_oa_open_param *param) -{ - param->sample = value; - return 0; -} - -static int xe_oa_set_prop_metric_set(struct xe_oa *oa, u64 value, - struct xe_oa_open_param *param) -{ - param->metric_set = value; - return 0; -} - -static int xe_oa_set_prop_oa_format(struct xe_oa *oa, u64 value, - struct xe_oa_open_param *param) -{ - int ret = decode_oa_format(oa, value, ¶m->oa_format); - - if (ret) { - drm_dbg(&oa->xe->drm, "Unsupported OA report format %#llx\n", value); - return ret; - } - return 0; -} - -static int xe_oa_set_prop_oa_exponent(struct xe_oa *oa, u64 value, - struct xe_oa_open_param *param) -{ -#define OA_EXPONENT_MAX 31 - - if (value > OA_EXPONENT_MAX) { - drm_dbg(&oa->xe->drm, "OA timer exponent too high (> %u)\n", OA_EXPONENT_MAX); - return -EINVAL; - } - param->period_exponent = value; - return 0; -} - -static int xe_oa_set_prop_disabled(struct xe_oa *oa, u64 value, - struct xe_oa_open_param *param) -{ - param->disabled = value; - return 0; -} - -static int xe_oa_set_prop_exec_queue_id(struct xe_oa *oa, u64 value, - struct xe_oa_open_param *param) -{ - param->exec_queue_id = value; - return 0; -} - -static int xe_oa_set_prop_engine_instance(struct xe_oa *oa, u64 value, - struct xe_oa_open_param *param) -{ - param->engine_instance = value; - return 0; -} - -static int xe_oa_set_no_preempt(struct xe_oa *oa, u64 value, - struct xe_oa_open_param *param) -{ - param->no_preempt = value; - return 0; -} - -typedef int (*xe_oa_set_property_fn)(struct xe_oa *oa, u64 value, - struct xe_oa_open_param *param); -static const xe_oa_set_property_fn xe_oa_set_property_funcs[] = { - [DRM_XE_OA_PROPERTY_OA_UNIT_ID] = xe_oa_set_prop_oa_unit_id, - [DRM_XE_OA_PROPERTY_SAMPLE_OA] = xe_oa_set_prop_sample_oa, - [DRM_XE_OA_PROPERTY_OA_METRIC_SET] = xe_oa_set_prop_metric_set, - [DRM_XE_OA_PROPERTY_OA_FORMAT] = xe_oa_set_prop_oa_format, - [DRM_XE_OA_PROPERTY_OA_PERIOD_EXPONENT] = xe_oa_set_prop_oa_exponent, - [DRM_XE_OA_PROPERTY_OA_DISABLED] = xe_oa_set_prop_disabled, - [DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID] = xe_oa_set_prop_exec_queue_id, - [DRM_XE_OA_PROPERTY_OA_ENGINE_INSTANCE] = xe_oa_set_prop_engine_instance, - [DRM_XE_OA_PROPERTY_NO_PREEMPT] = xe_oa_set_no_preempt, -}; - -static int xe_oa_user_ext_set_property(struct xe_oa *oa, u64 extension, - struct xe_oa_open_param *param) -{ - u64 __user *address = u64_to_user_ptr(extension); - struct drm_xe_ext_set_property ext; - int err; - u32 idx; - - err = __copy_from_user(&ext, address, sizeof(ext)); - if (XE_IOCTL_DBG(oa->xe, err)) - return -EFAULT; - - if (XE_IOCTL_DBG(oa->xe, ext.property >= ARRAY_SIZE(xe_oa_set_property_funcs)) || - XE_IOCTL_DBG(oa->xe, ext.pad)) - return -EINVAL; - - idx = array_index_nospec(ext.property, ARRAY_SIZE(xe_oa_set_property_funcs)); - return xe_oa_set_property_funcs[idx](oa, ext.value, param); -} - -typedef int (*xe_oa_user_extension_fn)(struct xe_oa *oa, u64 extension, - struct xe_oa_open_param *param); -static const xe_oa_user_extension_fn xe_oa_user_extension_funcs[] = { - [DRM_XE_OA_EXTENSION_SET_PROPERTY] = xe_oa_user_ext_set_property, -}; - -#define MAX_USER_EXTENSIONS 16 -static int xe_oa_user_extensions(struct xe_oa *oa, u64 extension, int ext_number, - struct xe_oa_open_param *param) -{ - u64 __user *address = u64_to_user_ptr(extension); - struct drm_xe_user_extension ext; - int err; - u32 idx; - - if (XE_IOCTL_DBG(oa->xe, ext_number >= MAX_USER_EXTENSIONS)) - return -E2BIG; - - err = __copy_from_user(&ext, address, sizeof(ext)); - if (XE_IOCTL_DBG(oa->xe, err)) - return -EFAULT; - - if (XE_IOCTL_DBG(oa->xe, ext.pad) || - XE_IOCTL_DBG(oa->xe, ext.name >= ARRAY_SIZE(xe_oa_user_extension_funcs))) - return -EINVAL; - - idx = array_index_nospec(ext.name, ARRAY_SIZE(xe_oa_user_extension_funcs)); - err = xe_oa_user_extension_funcs[idx](oa, extension, param); - if (XE_IOCTL_DBG(oa->xe, err)) - return err; - - if (ext.next_extension) - return xe_oa_user_extensions(oa, ext.next_extension, ++ext_number, param); - - return 0; -} - /** * xe_oa_stream_open_ioctl - Opens an OA stream * @dev: @drm_device @@ -1812,7 +2056,8 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f return -ENODEV; } - ret = xe_oa_user_extensions(oa, data, 0, ¶m); + param.xef = xef; + ret = xe_oa_user_extensions(oa, XE_OA_USER_EXTN_FROM_OPEN, data, 0, ¶m); if (ret) return ret; @@ -1880,11 +2125,24 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f drm_dbg(&oa->xe->drm, "Using periodic sampling freq %lld Hz\n", oa_freq_hz); } + ret = xe_oa_parse_syncs(oa, ¶m); + if (ret) + goto err_exec_q; + mutex_lock(¶m.hwe->gt->oa.gt_lock); ret = xe_oa_stream_open_ioctl_locked(oa, ¶m); mutex_unlock(¶m.hwe->gt->oa.gt_lock); + if (ret < 0) + goto err_sync_cleanup; + + return ret; + +err_sync_cleanup: + while (param.num_syncs--) + xe_sync_entry_cleanup(¶m.syncs[param.num_syncs]); + kfree(param.syncs); err_exec_q: - if (ret < 0 && param.exec_q) + if (param.exec_q) xe_exec_queue_put(param.exec_q); return ret; } diff --git a/drivers/gpu/drm/xe/xe_oa_types.h b/drivers/gpu/drm/xe/xe_oa_types.h index 8862eca73fbe..fea9d981e414 100644 --- a/drivers/gpu/drm/xe/xe_oa_types.h +++ b/drivers/gpu/drm/xe/xe_oa_types.h @@ -238,5 +238,17 @@ struct xe_oa_stream { /** @no_preempt: Whether preemption and timeslicing is disabled for stream exec_q */ u32 no_preempt; + + /** @xef: xe_file with which the stream was opened */ + struct xe_file *xef; + + /** @last_fence: fence to use in stream destroy when needed */ + struct dma_fence *last_fence; + + /** @num_syncs: size of @syncs array */ + u32 num_syncs; + + /** @syncs: syncs to wait on and to signal */ + struct xe_sync_entry *syncs; }; #endif diff --git a/drivers/gpu/drm/xe/xe_pat.c b/drivers/gpu/drm/xe/xe_pat.c index b16473818173..30fdbdb9341e 100644 --- a/drivers/gpu/drm/xe/xe_pat.c +++ b/drivers/gpu/drm/xe/xe_pat.c @@ -182,11 +182,12 @@ static void program_pat_mcr(struct xe_gt *gt, const struct xe_pat_table_entry ta static void xelp_dump(struct xe_gt *gt, struct drm_printer *p) { struct xe_device *xe = gt_to_xe(gt); - int i, err; + unsigned int fw_ref; + int i; - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (err) - goto err_fw; + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) + return; drm_printf(p, "PAT table:\n"); @@ -198,9 +199,7 @@ static void xelp_dump(struct xe_gt *gt, struct drm_printer *p) XELP_MEM_TYPE_STR_MAP[mem_type], pat); } - err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); -err_fw: - xe_assert(xe, !err); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } static const struct xe_pat_ops xelp_pat_ops = { @@ -211,11 +210,12 @@ static const struct xe_pat_ops xelp_pat_ops = { static void xehp_dump(struct xe_gt *gt, struct drm_printer *p) { struct xe_device *xe = gt_to_xe(gt); - int i, err; + unsigned int fw_ref; + int i; - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (err) - goto err_fw; + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) + return; drm_printf(p, "PAT table:\n"); @@ -229,9 +229,7 @@ static void xehp_dump(struct xe_gt *gt, struct drm_printer *p) XELP_MEM_TYPE_STR_MAP[mem_type], pat); } - err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); -err_fw: - xe_assert(xe, !err); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } static const struct xe_pat_ops xehp_pat_ops = { @@ -242,11 +240,12 @@ static const struct xe_pat_ops xehp_pat_ops = { static void xehpc_dump(struct xe_gt *gt, struct drm_printer *p) { struct xe_device *xe = gt_to_xe(gt); - int i, err; + unsigned int fw_ref; + int i; - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (err) - goto err_fw; + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) + return; drm_printf(p, "PAT table:\n"); @@ -258,9 +257,7 @@ static void xehpc_dump(struct xe_gt *gt, struct drm_printer *p) REG_FIELD_GET(XEHPC_CLOS_LEVEL_MASK, pat), pat); } - err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); -err_fw: - xe_assert(xe, !err); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } static const struct xe_pat_ops xehpc_pat_ops = { @@ -271,11 +268,12 @@ static const struct xe_pat_ops xehpc_pat_ops = { static void xelpg_dump(struct xe_gt *gt, struct drm_printer *p) { struct xe_device *xe = gt_to_xe(gt); - int i, err; + unsigned int fw_ref; + int i; - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (err) - goto err_fw; + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) + return; drm_printf(p, "PAT table:\n"); @@ -292,9 +290,7 @@ static void xelpg_dump(struct xe_gt *gt, struct drm_printer *p) REG_FIELD_GET(XELPG_INDEX_COH_MODE_MASK, pat), pat); } - err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); -err_fw: - xe_assert(xe, !err); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } /* @@ -330,12 +326,13 @@ static void xe2lpm_program_pat(struct xe_gt *gt, const struct xe_pat_table_entry static void xe2_dump(struct xe_gt *gt, struct drm_printer *p) { struct xe_device *xe = gt_to_xe(gt); - int i, err; + unsigned int fw_ref; u32 pat; + int i; - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (err) - goto err_fw; + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) + return; drm_printf(p, "PAT table:\n"); @@ -374,9 +371,7 @@ static void xe2_dump(struct xe_gt *gt, struct drm_printer *p) REG_FIELD_GET(XE2_COH_MODE, pat), pat); - err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); -err_fw: - xe_assert(xe, !err); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } static const struct xe_pat_ops xe2_pat_ops = { diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c index e6640283893f..6b7f77425c7f 100644 --- a/drivers/gpu/drm/xe/xe_pci.c +++ b/drivers/gpu/drm/xe/xe_pci.c @@ -174,7 +174,7 @@ static const struct xe_graphics_desc graphics_xelpg = { GENMASK(XE_HW_ENGINE_CCS3, XE_HW_ENGINE_CCS0) static const struct xe_graphics_desc graphics_xe2 = { - .name = "Xe2_LPG / Xe2_HPG", + .name = "Xe2_LPG / Xe2_HPG / Xe3_LPG", XE2_GFX_FEATURES, }; diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c index 158629971eab..170ae72d1a7b 100644 --- a/drivers/gpu/drm/xe/xe_query.c +++ b/drivers/gpu/drm/xe/xe_query.c @@ -85,16 +85,13 @@ static __ktime_func_t __clock_id_to_func(clockid_t clk_id) } static void -__read_timestamps(struct xe_gt *gt, - struct xe_reg lower_reg, - struct xe_reg upper_reg, - u64 *engine_ts, - u64 *cpu_ts, - u64 *cpu_delta, - __ktime_func_t cpu_clock) +hwe_read_timestamp(struct xe_hw_engine *hwe, u64 *engine_ts, u64 *cpu_ts, + u64 *cpu_delta, __ktime_func_t cpu_clock) { - struct xe_mmio *mmio = >->mmio; + struct xe_mmio *mmio = &hwe->gt->mmio; u32 upper, lower, old_upper, loop = 0; + struct xe_reg upper_reg = RING_TIMESTAMP_UDW(hwe->mmio_base), + lower_reg = RING_TIMESTAMP(hwe->mmio_base); upper = xe_mmio_read32(mmio, upper_reg); do { @@ -120,6 +117,7 @@ query_engine_cycles(struct xe_device *xe, __ktime_func_t cpu_clock; struct xe_hw_engine *hwe; struct xe_gt *gt; + unsigned int fw_ref; if (query->size == 0) { query->size = size; @@ -152,31 +150,27 @@ query_engine_cycles(struct xe_device *xe, if (!hwe) return -EINVAL; - if (xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL)) + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) { + xe_force_wake_put(gt_to_fw(gt), fw_ref); return -EIO; + } - __read_timestamps(gt, - RING_TIMESTAMP(hwe->mmio_base), - RING_TIMESTAMP_UDW(hwe->mmio_base), - &resp.engine_cycles, - &resp.cpu_timestamp, - &resp.cpu_delta, - cpu_clock); + hwe_read_timestamp(hwe, &resp.engine_cycles, &resp.cpu_timestamp, + &resp.cpu_delta, cpu_clock); - xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL); - resp.width = 36; + xe_force_wake_put(gt_to_fw(gt), fw_ref); - /* Only write to the output fields of user query */ - if (put_user(resp.cpu_timestamp, &query_ptr->cpu_timestamp)) - return -EFAULT; - - if (put_user(resp.cpu_delta, &query_ptr->cpu_delta)) - return -EFAULT; - - if (put_user(resp.engine_cycles, &query_ptr->engine_cycles)) - return -EFAULT; + if (GRAPHICS_VER(xe) >= 20) + resp.width = 64; + else + resp.width = 36; - if (put_user(resp.width, &query_ptr->width)) + /* Only write to the output fields of user query */ + if (put_user(resp.cpu_timestamp, &query_ptr->cpu_timestamp) || + put_user(resp.cpu_delta, &query_ptr->cpu_delta) || + put_user(resp.engine_cycles, &query_ptr->engine_cycles) || + put_user(resp.width, &query_ptr->width)) return -EFAULT; return 0; @@ -676,7 +670,7 @@ static int query_oa_units(struct xe_device *xe, du->oa_unit_id = u->oa_unit_id; du->oa_unit_type = u->type; du->oa_timestamp_freq = xe_oa_timestamp_frequency(gt); - du->capabilities = DRM_XE_OA_CAPS_BASE; + du->capabilities = DRM_XE_OA_CAPS_BASE | DRM_XE_OA_CAPS_SYNCS; j = 0; for_each_hw_engine(hwe, gt, hwe_id) { diff --git a/drivers/gpu/drm/xe/xe_reg_sr.c b/drivers/gpu/drm/xe/xe_reg_sr.c index 191cb4121acd..e1a0e27cda14 100644 --- a/drivers/gpu/drm/xe/xe_reg_sr.c +++ b/drivers/gpu/drm/xe/xe_reg_sr.c @@ -188,27 +188,27 @@ void xe_reg_sr_apply_mmio(struct xe_reg_sr *sr, struct xe_gt *gt) { struct xe_reg_sr_entry *entry; unsigned long reg; - int err; + unsigned int fw_ref; if (xa_empty(&sr->xa)) return; xe_gt_dbg(gt, "Applying %s save-restore MMIOs\n", sr->name); - err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); - if (err) + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) goto err_force_wake; xa_for_each(&sr->xa, reg, entry) apply_one_mmio(gt, entry); - err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL); - XE_WARN_ON(err); + xe_force_wake_put(gt_to_fw(gt), fw_ref); return; err_force_wake: - xe_gt_err(gt, "Failed to apply, err=%d\n", err); + xe_force_wake_put(gt_to_fw(gt), fw_ref); + xe_gt_err(gt, "Failed to apply, err=-ETIMEDOUT\n"); } void xe_reg_sr_apply_whitelist(struct xe_hw_engine *hwe) @@ -221,15 +221,15 @@ void xe_reg_sr_apply_whitelist(struct xe_hw_engine *hwe) u32 mmio_base = hwe->mmio_base; unsigned long reg; unsigned int slot = 0; - int err; + unsigned int fw_ref; if (xa_empty(&sr->xa)) return; drm_dbg(&xe->drm, "Whitelisting %s registers\n", sr->name); - err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); - if (err) + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) goto err_force_wake; p = drm_dbg_printer(&xe->drm, DRM_UT_DRIVER, NULL); @@ -254,13 +254,13 @@ void xe_reg_sr_apply_whitelist(struct xe_hw_engine *hwe) xe_mmio_write32(>->mmio, RING_FORCE_TO_NONPRIV(mmio_base, slot), addr); } - err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL); - XE_WARN_ON(err); + xe_force_wake_put(gt_to_fw(gt), fw_ref); return; err_force_wake: - drm_err(&xe->drm, "Failed to apply, err=%d\n", err); + xe_force_wake_put(gt_to_fw(gt), fw_ref); + drm_err(&xe->drm, "Failed to apply, err=-ETIMEDOUT\n"); } /** diff --git a/drivers/gpu/drm/xe/xe_sched_job.c b/drivers/gpu/drm/xe/xe_sched_job.c index eeccc1c318ae..1905ca590965 100644 --- a/drivers/gpu/drm/xe/xe_sched_job.c +++ b/drivers/gpu/drm/xe/xe_sched_job.c @@ -280,7 +280,7 @@ void xe_sched_job_arm(struct xe_sched_job *job) fence = &chain->base; } - job->fence = fence; + job->fence = dma_fence_get(fence); /* Pairs with put in scheduler */ drm_sched_job_arm(&job->drm); } diff --git a/drivers/gpu/drm/xe/xe_sched_job_types.h b/drivers/gpu/drm/xe/xe_sched_job_types.h index 0d3f76fb05ce..f13f333f00be 100644 --- a/drivers/gpu/drm/xe/xe_sched_job_types.h +++ b/drivers/gpu/drm/xe/xe_sched_job_types.h @@ -40,7 +40,6 @@ struct xe_sched_job { * @fence: dma fence to indicate completion. 1 way relationship - job * can safely reference fence, fence cannot safely reference job. */ -#define JOB_FLAG_SUBMIT DMA_FENCE_FLAG_USER_BITS struct dma_fence *fence; /** @user_fence: write back value when BB is complete */ struct { @@ -63,7 +62,7 @@ struct xe_sched_job { struct xe_sched_job_snapshot { u16 batch_addr_len; - u64 batch_addr[]; + u64 batch_addr[] __counted_by(batch_addr_len); }; #endif diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c index bb3c2a830362..42f5bebd09e5 100644 --- a/drivers/gpu/drm/xe/xe_sync.c +++ b/drivers/gpu/drm/xe/xe_sync.c @@ -54,11 +54,12 @@ static struct xe_user_fence *user_fence_create(struct xe_device *xe, u64 addr, { struct xe_user_fence *ufence; u64 __user *ptr = u64_to_user_ptr(addr); + u64 __maybe_unused prefetch_val; - if (!access_ok(ptr, sizeof(*ptr))) + if (get_user(prefetch_val, ptr)) return ERR_PTR(-EFAULT); - ufence = kmalloc(sizeof(*ufence), GFP_KERNEL); + ufence = kzalloc(sizeof(*ufence), GFP_KERNEL); if (!ufence) return ERR_PTR(-ENOMEM); @@ -82,10 +83,16 @@ static void user_fence_worker(struct work_struct *w) XE_WARN_ON("Copy to user failed"); kthread_unuse_mm(ufence->mm); mmput(ufence->mm); + } else { + drm_dbg(&ufence->xe->drm, "mmget_not_zero() failed, ufence wasn't signaled\n"); } - wake_up_all(&ufence->xe->ufence_wq); + /* + * Wake up waiters only after updating the ufence state, allowing the UMD + * to safely reuse the same ufence without encountering -EBUSY errors. + */ WRITE_ONCE(ufence->signalled, 1); + wake_up_all(&ufence->xe->ufence_wq); user_fence_put(ufence); } diff --git a/drivers/gpu/drm/xe/xe_vram.c b/drivers/gpu/drm/xe/xe_vram.c index 2a623bfcda7e..b1f81dca610d 100644 --- a/drivers/gpu/drm/xe/xe_vram.c +++ b/drivers/gpu/drm/xe/xe_vram.c @@ -220,8 +220,8 @@ static int tile_vram_size(struct xe_tile *tile, u64 *vram_size, { struct xe_device *xe = tile_to_xe(tile); struct xe_gt *gt = tile->primary_gt; + unsigned int fw_ref; u64 offset; - int err; u32 reg; if (IS_SRIOV_VF(xe)) { @@ -240,9 +240,9 @@ static int tile_vram_size(struct xe_tile *tile, u64 *vram_size, return 0; } - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (err) - return err; + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) + return -ETIMEDOUT; /* actual size */ if (unlikely(xe->info.platform == XE_DG1)) { @@ -264,7 +264,9 @@ static int tile_vram_size(struct xe_tile *tile, u64 *vram_size, /* remove the tile offset so we have just the available size */ *vram_size = offset - *tile_offset; - return xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + xe_force_wake_put(gt_to_fw(gt), fw_ref); + + return 0; } static void vram_fini(void *arg) diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c index 0c90438e8e72..02cf647f86d8 100644 --- a/drivers/gpu/drm/xe/xe_wa.c +++ b/drivers/gpu/drm/xe/xe_wa.c @@ -602,6 +602,11 @@ static const struct xe_rtp_entry_sr engine_was[] = { XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3001), FUNC(xe_rtp_match_first_render_or_compute)), XE_RTP_ACTIONS(SET(HALF_SLICE_CHICKEN7, CLEAR_OPTIMIZATION_DISABLE)) }, + { XE_RTP_NAME("18034896535"), + XE_RTP_RULES(GRAPHICS_VERSION(3000), GRAPHICS_STEP(A0, B0), + FUNC(xe_rtp_match_first_render_or_compute)), + XE_RTP_ACTIONS(SET(ROW_CHICKEN4, DISABLE_TDL_PUSH)) + }, {} }; diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules index 264d6e116499..bcd04464b85e 100644 --- a/drivers/gpu/drm/xe/xe_wa_oob.rules +++ b/drivers/gpu/drm/xe/xe_wa_oob.rules @@ -39,3 +39,5 @@ 14019789679 GRAPHICS_VERSION(1255) GRAPHICS_VERSION_RANGE(1270, 2004) no_media_l3 MEDIA_VERSION(3000) +14022866841 GRAPHICS_VERSION(3000), GRAPHICS_STEP(A0, B0) + MEDIA_VERSION(3000), MEDIA_STEP(A0, B0) diff --git a/drivers/gpu/drm/xe/xe_wait_user_fence.c b/drivers/gpu/drm/xe/xe_wait_user_fence.c index d46fa8374980..5b4264ea38bd 100644 --- a/drivers/gpu/drm/xe/xe_wait_user_fence.c +++ b/drivers/gpu/drm/xe/xe_wait_user_fence.c @@ -155,6 +155,13 @@ int xe_wait_user_fence_ioctl(struct drm_device *dev, void *data, } if (!timeout) { + LNL_FLUSH_WORKQUEUE(xe->ordered_wq); + err = do_compare(addr, args->value, args->mask, + args->op); + if (err <= 0) { + drm_dbg(&xe->drm, "LNL_FLUSH_WORKQUEUE resolved ufence timeout\n"); + break; + } err = -ETIME; break; } @@ -169,9 +176,6 @@ int xe_wait_user_fence_ioctl(struct drm_device *dev, void *data, args->timeout = 0; } - if (!timeout && !(err < 0)) - err = -ETIME; - if (q) xe_exec_queue_put(q); diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.c b/drivers/gpu/drm/xlnx/zynqmp_disp.c index 9368acf56eaf..e4e0e299e8a7 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_disp.c +++ b/drivers/gpu/drm/xlnx/zynqmp_disp.c @@ -1200,6 +1200,9 @@ static void zynqmp_disp_layer_release_dma(struct zynqmp_disp *disp, { unsigned int i; + if (!layer->info) + return; + for (i = 0; i < layer->info->num_channels; i++) { struct zynqmp_disp_layer_dma *dma = &layer->dmas[i]; diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c index 129beac4c073..25c5dc61ee88 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_dp.c +++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c @@ -18,7 +18,9 @@ #include <drm/drm_modes.h> #include <drm/drm_of.h> +#include <linux/bitfield.h> #include <linux/clk.h> +#include <linux/debugfs.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/io.h> @@ -51,6 +53,7 @@ MODULE_PARM_DESC(power_on_delay_ms, "DP power on delay in msec (default: 4)"); #define ZYNQMP_DP_LANE_COUNT_SET 0x4 #define ZYNQMP_DP_ENHANCED_FRAME_EN 0x8 #define ZYNQMP_DP_TRAINING_PATTERN_SET 0xc +#define ZYNQMP_DP_LINK_QUAL_PATTERN_SET 0x10 #define ZYNQMP_DP_SCRAMBLING_DISABLE 0x14 #define ZYNQMP_DP_DOWNSPREAD_CTL 0x18 #define ZYNQMP_DP_SOFTWARE_RESET 0x1c @@ -64,6 +67,9 @@ MODULE_PARM_DESC(power_on_delay_ms, "DP power on delay in msec (default: 4)"); ZYNQMP_DP_SOFTWARE_RESET_STREAM3 | \ ZYNQMP_DP_SOFTWARE_RESET_STREAM4 | \ ZYNQMP_DP_SOFTWARE_RESET_AUX) +#define ZYNQMP_DP_COMP_PATTERN_80BIT_1 0x20 +#define ZYNQMP_DP_COMP_PATTERN_80BIT_2 0x24 +#define ZYNQMP_DP_COMP_PATTERN_80BIT_3 0x28 /* Core enable registers */ #define ZYNQMP_DP_TRANSMITTER_ENABLE 0x80 @@ -207,6 +213,7 @@ MODULE_PARM_DESC(power_on_delay_ms, "DP power on delay in msec (default: 4)"); #define ZYNQMP_DP_TX_PHY_POWER_DOWN_LANE_2 BIT(2) #define ZYNQMP_DP_TX_PHY_POWER_DOWN_LANE_3 BIT(3) #define ZYNQMP_DP_TX_PHY_POWER_DOWN_ALL 0xf +#define ZYNQMP_DP_TRANSMIT_PRBS7 0x230 #define ZYNQMP_DP_PHY_PRECURSOR_LANE_0 0x23c #define ZYNQMP_DP_PHY_PRECURSOR_LANE_1 0x240 #define ZYNQMP_DP_PHY_PRECURSOR_LANE_2 0x244 @@ -275,30 +282,108 @@ struct zynqmp_dp_config { }; /** + * enum test_pattern - Test patterns for test testing + * @TEST_VIDEO: Use regular video input + * @TEST_SYMBOL_ERROR: Symbol error measurement pattern + * @TEST_PRBS7: Output of the PRBS7 (x^7 + x^6 + 1) polynomial + * @TEST_80BIT_CUSTOM: A custom 80-bit pattern + * @TEST_CP2520: HBR2 compliance eye pattern + * @TEST_TPS1: Link training symbol pattern TPS1 (/D10.2/) + * @TEST_TPS2: Link training symbol pattern TPS2 + * @TEST_TPS3: Link training symbol pattern TPS3 (for HBR2) + */ +enum test_pattern { + TEST_VIDEO, + TEST_TPS1, + TEST_TPS2, + TEST_TPS3, + TEST_SYMBOL_ERROR, + TEST_PRBS7, + TEST_80BIT_CUSTOM, + TEST_CP2520, +}; + +static const char *const test_pattern_str[] = { + [TEST_VIDEO] = "video", + [TEST_TPS1] = "tps1", + [TEST_TPS2] = "tps2", + [TEST_TPS3] = "tps3", + [TEST_SYMBOL_ERROR] = "symbol-error", + [TEST_PRBS7] = "prbs7", + [TEST_80BIT_CUSTOM] = "80bit-custom", + [TEST_CP2520] = "cp2520", +}; + +/** + * struct zynqmp_dp_test - Configuration for test mode + * @pattern: The test pattern + * @enhanced: Use enhanced framing + * @downspread: Use SSC + * @active: Whether test mode is active + * @custom: Custom pattern for %TEST_80BIT_CUSTOM + * @train_set: Voltage/preemphasis settings + * @bw_code: Bandwidth code for the link + * @link_cnt: Number of lanes + */ +struct zynqmp_dp_test { + enum test_pattern pattern; + bool enhanced, downspread, active; + u8 custom[10]; + u8 train_set[ZYNQMP_DP_MAX_LANES]; + u8 bw_code; + u8 link_cnt; +}; + +/** + * struct zynqmp_dp_train_set_priv - Private data for train_set debugfs files + * @dp: DisplayPort IP core structure + * @lane: The lane for this file + */ +struct zynqmp_dp_train_set_priv { + struct zynqmp_dp *dp; + int lane; +}; + +/** * struct zynqmp_dp - Xilinx DisplayPort core * @dev: device structure * @dpsub: Display subsystem * @iomem: device I/O memory for register access * @reset: reset controller + * @lock: Mutex protecting this struct and register access (but not AUX) * @irq: irq * @bridge: DRM bridge for the DP encoder * @next_bridge: The downstream bridge + * @test: Configuration for test mode * @config: IP core configuration from DTS * @aux: aux channel + * @aux_done: Completed when we get an AUX reply or timeout + * @ignore_aux_errors: If set, AUX errors are suppressed * @phy: PHY handles for DP lanes * @num_lanes: number of enabled phy lanes * @hpd_work: hot plug detection worker + * @hpd_irq_work: hot plug detection IRQ worker + * @ignore_hpd: If set, HPD events and IRQs are ignored * @status: connection status * @enabled: flag to indicate if the device is enabled * @dpcd: DP configuration data from currently connected sink device * @link_config: common link configuration between IP core and sink device * @mode: current mode between IP core and sink device * @train_set: set of training data + * @debugfs_train_set: Debugfs private data for @train_set + * + * @lock covers the link configuration in this struct and the device's + * registers. It does not cover @aux or @ignore_aux_errors. It is not strictly + * required for any of the members which are only modified at probe/remove time + * (e.g. @dev). */ struct zynqmp_dp { struct drm_dp_aux aux; struct drm_bridge bridge; struct work_struct hpd_work; + struct work_struct hpd_irq_work; + struct completion aux_done; + struct mutex lock; struct drm_bridge *next_bridge; struct device *dev; @@ -310,9 +395,13 @@ struct zynqmp_dp { enum drm_connector_status status; int irq; bool enabled; + bool ignore_aux_errors; + bool ignore_hpd; + struct zynqmp_dp_train_set_priv debugfs_train_set[ZYNQMP_DP_MAX_LANES]; struct zynqmp_dp_mode mode; struct zynqmp_dp_link_config link_config; + struct zynqmp_dp_test test; struct zynqmp_dp_config config; u8 dpcd[DP_RECEIVER_CAP_SIZE]; u8 train_set[ZYNQMP_DP_MAX_LANES]; @@ -626,6 +715,7 @@ static void zynqmp_dp_adjust_train(struct zynqmp_dp *dp, /** * zynqmp_dp_update_vs_emph - Update the training values * @dp: DisplayPort IP core structure + * @train_set: A set of training values * * Update the training values based on the request from sink. The mapped values * are predefined, and values(vs, pe, pc) are from the device manual. @@ -633,12 +723,12 @@ static void zynqmp_dp_adjust_train(struct zynqmp_dp *dp, * Return: 0 if vs and emph are updated successfully, or the error code returned * by drm_dp_dpcd_write(). */ -static int zynqmp_dp_update_vs_emph(struct zynqmp_dp *dp) +static int zynqmp_dp_update_vs_emph(struct zynqmp_dp *dp, u8 *train_set) { unsigned int i; int ret; - ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, dp->train_set, + ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, train_set, dp->mode.lane_cnt); if (ret < 0) return ret; @@ -646,7 +736,7 @@ static int zynqmp_dp_update_vs_emph(struct zynqmp_dp *dp) for (i = 0; i < dp->mode.lane_cnt; i++) { u32 reg = ZYNQMP_DP_SUB_TX_PHY_PRECURSOR_LANE_0 + i * 4; union phy_configure_opts opts = { 0 }; - u8 train = dp->train_set[i]; + u8 train = train_set[i]; opts.dp.voltage[0] = (train & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT; @@ -690,7 +780,7 @@ static int zynqmp_dp_link_train_cr(struct zynqmp_dp *dp) * So, This loop should exit before 512 iterations */ for (max_tries = 0; max_tries < 512; max_tries++) { - ret = zynqmp_dp_update_vs_emph(dp); + ret = zynqmp_dp_update_vs_emph(dp, dp->train_set); if (ret) return ret; @@ -755,7 +845,7 @@ static int zynqmp_dp_link_train_ce(struct zynqmp_dp *dp) return ret; for (tries = 0; tries < DP_MAX_TRAINING_TRIES; tries++) { - ret = zynqmp_dp_update_vs_emph(dp); + ret = zynqmp_dp_update_vs_emph(dp, dp->train_set); if (ret) return ret; @@ -778,28 +868,29 @@ static int zynqmp_dp_link_train_ce(struct zynqmp_dp *dp) } /** - * zynqmp_dp_train - Train the link + * zynqmp_dp_setup() - Set up major link parameters * @dp: DisplayPort IP core structure + * @bw_code: The link bandwidth as a multiple of 270 MHz + * @lane_cnt: The number of lanes to use + * @enhanced: Use enhanced framing + * @downspread: Enable spread-spectrum clocking * - * Return: 0 if all trains are done successfully, or corresponding error code. + * Return: 0 on success, or -errno on failure */ -static int zynqmp_dp_train(struct zynqmp_dp *dp) +static int zynqmp_dp_setup(struct zynqmp_dp *dp, u8 bw_code, u8 lane_cnt, + bool enhanced, bool downspread) { u32 reg; - u8 bw_code = dp->mode.bw_code; - u8 lane_cnt = dp->mode.lane_cnt; u8 aux_lane_cnt = lane_cnt; - bool enhanced; int ret; zynqmp_dp_write(dp, ZYNQMP_DP_LANE_COUNT_SET, lane_cnt); - enhanced = drm_dp_enhanced_frame_cap(dp->dpcd); if (enhanced) { zynqmp_dp_write(dp, ZYNQMP_DP_ENHANCED_FRAME_EN, 1); aux_lane_cnt |= DP_LANE_COUNT_ENHANCED_FRAME_EN; } - if (dp->dpcd[3] & 0x1) { + if (downspread) { zynqmp_dp_write(dp, ZYNQMP_DP_DOWNSPREAD_CTL, 1); drm_dp_dpcd_writeb(&dp->aux, DP_DOWNSPREAD_CTRL, DP_SPREAD_AMP_0_5); @@ -842,8 +933,24 @@ static int zynqmp_dp_train(struct zynqmp_dp *dp) } zynqmp_dp_write(dp, ZYNQMP_DP_PHY_CLOCK_SELECT, reg); - ret = zynqmp_dp_phy_ready(dp); - if (ret < 0) + return zynqmp_dp_phy_ready(dp); +} + +/** + * zynqmp_dp_train - Train the link + * @dp: DisplayPort IP core structure + * + * Return: 0 if all trains are done successfully, or corresponding error code. + */ +static int zynqmp_dp_train(struct zynqmp_dp *dp) +{ + int ret; + + ret = zynqmp_dp_setup(dp, dp->mode.bw_code, dp->mode.lane_cnt, + drm_dp_enhanced_frame_cap(dp->dpcd), + dp->dpcd[DP_MAX_DOWNSPREAD] & + DP_MAX_DOWNSPREAD_0_5); + if (ret) return ret; zynqmp_dp_write(dp, ZYNQMP_DP_SCRAMBLING_DISABLE, 1); @@ -934,12 +1041,15 @@ static int zynqmp_dp_aux_cmd_submit(struct zynqmp_dp *dp, u32 cmd, u16 addr, u8 *buf, u8 bytes, u8 *reply) { bool is_read = (cmd & AUX_READ_BIT) ? true : false; + unsigned long time_left; u32 reg, i; reg = zynqmp_dp_read(dp, ZYNQMP_DP_INTERRUPT_SIGNAL_STATE); if (reg & ZYNQMP_DP_INTERRUPT_SIGNAL_STATE_REQUEST) return -EBUSY; + reinit_completion(&dp->aux_done); + zynqmp_dp_write(dp, ZYNQMP_DP_AUX_ADDRESS, addr); if (!is_read) for (i = 0; i < bytes; i++) @@ -954,17 +1064,14 @@ static int zynqmp_dp_aux_cmd_submit(struct zynqmp_dp *dp, u32 cmd, u16 addr, zynqmp_dp_write(dp, ZYNQMP_DP_AUX_COMMAND, reg); /* Wait for reply to be delivered upto 2ms */ - for (i = 0; ; i++) { - reg = zynqmp_dp_read(dp, ZYNQMP_DP_INTERRUPT_SIGNAL_STATE); - if (reg & ZYNQMP_DP_INTERRUPT_SIGNAL_STATE_REPLY) - break; - - if (reg & ZYNQMP_DP_INTERRUPT_SIGNAL_STATE_REPLY_TIMEOUT || - i == 2) - return -ETIMEDOUT; + time_left = wait_for_completion_timeout(&dp->aux_done, + msecs_to_jiffies(2)); + if (!time_left) + return -ETIMEDOUT; - usleep_range(1000, 1100); - } + reg = zynqmp_dp_read(dp, ZYNQMP_DP_INTERRUPT_SIGNAL_STATE); + if (reg & ZYNQMP_DP_INTERRUPT_SIGNAL_STATE_REPLY_TIMEOUT) + return -ETIMEDOUT; reg = zynqmp_dp_read(dp, ZYNQMP_DP_AUX_REPLY_CODE); if (reply) @@ -1006,6 +1113,8 @@ zynqmp_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) if (dp->status == connector_status_disconnected) { dev_dbg(dp->dev, "no connected aux device\n"); + if (dp->ignore_aux_errors) + goto fake_response; return -ENODEV; } @@ -1014,7 +1123,13 @@ zynqmp_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) dev_dbg(dp->dev, "failed to do aux transfer (%d)\n", ret); - return ret; + if (!dp->ignore_aux_errors) + return ret; + +fake_response: + msg->reply = DP_AUX_NATIVE_REPLY_ACK; + memset(msg->buffer, 0, msg->size); + return msg->size; } /** @@ -1048,6 +1163,9 @@ static int zynqmp_dp_aux_init(struct zynqmp_dp *dp) (w << ZYNQMP_DP_AUX_CLK_DIVIDER_AUX_FILTER_SHIFT) | (rate / (1000 * 1000))); + zynqmp_dp_write(dp, ZYNQMP_DP_INT_EN, ZYNQMP_DP_INT_REPLY_RECEIVED | + ZYNQMP_DP_INT_REPLY_TIMEOUT); + dp->aux.name = "ZynqMP DP AUX"; dp->aux.dev = dp->dev; dp->aux.drm_dev = dp->bridge.dev; @@ -1065,6 +1183,9 @@ static int zynqmp_dp_aux_init(struct zynqmp_dp *dp) static void zynqmp_dp_aux_cleanup(struct zynqmp_dp *dp) { drm_dp_aux_unregister(&dp->aux); + + zynqmp_dp_write(dp, ZYNQMP_DP_INT_DS, ZYNQMP_DP_INT_REPLY_RECEIVED | + ZYNQMP_DP_INT_REPLY_TIMEOUT); } /* ----------------------------------------------------------------------------- @@ -1386,8 +1507,10 @@ zynqmp_dp_bridge_mode_valid(struct drm_bridge *bridge, } /* Check with link rate and lane count */ + mutex_lock(&dp->lock); rate = zynqmp_dp_max_rate(dp->link_config.max_rate, dp->link_config.max_lanes, dp->config.bpp); + mutex_unlock(&dp->lock); if (mode->clock > rate) { dev_dbg(dp->dev, "filtered mode %s for high pixel rate\n", mode->name); @@ -1414,6 +1537,7 @@ static void zynqmp_dp_bridge_atomic_enable(struct drm_bridge *bridge, pm_runtime_get_sync(dp->dev); + mutex_lock(&dp->lock); zynqmp_dp_disp_enable(dp, old_bridge_state); /* @@ -1474,6 +1598,7 @@ static void zynqmp_dp_bridge_atomic_enable(struct drm_bridge *bridge, zynqmp_dp_write(dp, ZYNQMP_DP_SOFTWARE_RESET, ZYNQMP_DP_SOFTWARE_RESET_ALL); zynqmp_dp_write(dp, ZYNQMP_DP_MAIN_STREAM_ENABLE, 1); + mutex_unlock(&dp->lock); } static void zynqmp_dp_bridge_atomic_disable(struct drm_bridge *bridge, @@ -1481,6 +1606,7 @@ static void zynqmp_dp_bridge_atomic_disable(struct drm_bridge *bridge, { struct zynqmp_dp *dp = bridge_to_dp(bridge); + mutex_lock(&dp->lock); dp->enabled = false; cancel_work(&dp->hpd_work); zynqmp_dp_write(dp, ZYNQMP_DP_MAIN_STREAM_ENABLE, 0); @@ -1491,6 +1617,7 @@ static void zynqmp_dp_bridge_atomic_disable(struct drm_bridge *bridge, zynqmp_dp_write(dp, ZYNQMP_DP_TX_AUDIO_CONTROL, 0); zynqmp_dp_disp_disable(dp, old_bridge_state); + mutex_unlock(&dp->lock); pm_runtime_put_sync(dp->dev); } @@ -1526,13 +1653,14 @@ static int zynqmp_dp_bridge_atomic_check(struct drm_bridge *bridge, return 0; } -static enum drm_connector_status zynqmp_dp_bridge_detect(struct drm_bridge *bridge) +static enum drm_connector_status __zynqmp_dp_bridge_detect(struct zynqmp_dp *dp) { - struct zynqmp_dp *dp = bridge_to_dp(bridge); struct zynqmp_dp_link_config *link_config = &dp->link_config; u32 state, i; int ret; + lockdep_assert_held(&dp->lock); + /* * This is from heuristic. It takes some delay (ex, 100 ~ 500 msec) to * get the HPD signal with some monitors. @@ -1568,6 +1696,18 @@ disconnected: return connector_status_disconnected; } +static enum drm_connector_status zynqmp_dp_bridge_detect(struct drm_bridge *bridge) +{ + struct zynqmp_dp *dp = bridge_to_dp(bridge); + enum drm_connector_status ret; + + mutex_lock(&dp->lock); + ret = __zynqmp_dp_bridge_detect(dp); + mutex_unlock(&dp->lock); + + return ret; +} + static const struct drm_edid *zynqmp_dp_bridge_edid_read(struct drm_bridge *bridge, struct drm_connector *connector) { @@ -1605,6 +1745,582 @@ zynqmp_dp_bridge_get_input_bus_fmts(struct drm_bridge *bridge, return zynqmp_dp_bridge_default_bus_fmts(num_input_fmts); } +/* ----------------------------------------------------------------------------- + * debugfs + */ + +/** + * zynqmp_dp_set_test_pattern() - Configure the link for a test pattern + * @dp: DisplayPort IP core structure + * @pattern: The test pattern to configure + * @custom: The custom pattern to use if @pattern is %TEST_80BIT_CUSTOM + * + * Return: 0 on success, or negative errno on (DPCD) failure + */ +static int zynqmp_dp_set_test_pattern(struct zynqmp_dp *dp, + enum test_pattern pattern, + u8 *const custom) +{ + bool scramble = false; + u32 train_pattern = 0; + u32 link_pattern = 0; + u8 dpcd_train = 0; + u8 dpcd_link = 0; + int ret; + + switch (pattern) { + case TEST_TPS1: + train_pattern = 1; + break; + case TEST_TPS2: + train_pattern = 2; + break; + case TEST_TPS3: + train_pattern = 3; + break; + case TEST_SYMBOL_ERROR: + scramble = true; + link_pattern = DP_PHY_TEST_PATTERN_ERROR_COUNT; + break; + case TEST_PRBS7: + /* We use a dedicated register to enable PRBS7 */ + dpcd_link = DP_LINK_QUAL_PATTERN_ERROR_RATE; + break; + case TEST_80BIT_CUSTOM: { + const u8 *p = custom; + + link_pattern = DP_LINK_QUAL_PATTERN_80BIT_CUSTOM; + + zynqmp_dp_write(dp, ZYNQMP_DP_COMP_PATTERN_80BIT_1, + (p[3] << 24) | (p[2] << 16) | (p[1] << 8) | p[0]); + zynqmp_dp_write(dp, ZYNQMP_DP_COMP_PATTERN_80BIT_2, + (p[7] << 24) | (p[6] << 16) | (p[5] << 8) | p[4]); + zynqmp_dp_write(dp, ZYNQMP_DP_COMP_PATTERN_80BIT_3, + (p[9] << 8) | p[8]); + break; + } + case TEST_CP2520: + link_pattern = DP_LINK_QUAL_PATTERN_CP2520_PAT_1; + break; + default: + WARN_ON_ONCE(1); + fallthrough; + case TEST_VIDEO: + scramble = true; + } + + zynqmp_dp_write(dp, ZYNQMP_DP_SCRAMBLING_DISABLE, !scramble); + zynqmp_dp_write(dp, ZYNQMP_DP_TRAINING_PATTERN_SET, train_pattern); + zynqmp_dp_write(dp, ZYNQMP_DP_LINK_QUAL_PATTERN_SET, link_pattern); + zynqmp_dp_write(dp, ZYNQMP_DP_TRANSMIT_PRBS7, pattern == TEST_PRBS7); + + dpcd_link = dpcd_link ?: link_pattern; + dpcd_train = train_pattern; + if (!scramble) + dpcd_train |= DP_LINK_SCRAMBLING_DISABLE; + + if (dp->dpcd[DP_DPCD_REV] < 0x12) { + if (pattern == TEST_CP2520) + dev_warn(dp->dev, + "can't set sink link quality pattern to CP2520 for DPCD < r1.2; error counters will be invalid\n"); + else + dpcd_train |= FIELD_PREP(DP_LINK_QUAL_PATTERN_11_MASK, + dpcd_link); + } else { + u8 dpcd_link_lane[ZYNQMP_DP_MAX_LANES]; + + memset(dpcd_link_lane, dpcd_link, ZYNQMP_DP_MAX_LANES); + ret = drm_dp_dpcd_write(&dp->aux, DP_LINK_QUAL_LANE0_SET, + dpcd_link_lane, ZYNQMP_DP_MAX_LANES); + if (ret < 0) + return ret; + } + + ret = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET, dpcd_train); + return ret < 0 ? ret : 0; +} + +static int zynqmp_dp_test_setup(struct zynqmp_dp *dp) +{ + return zynqmp_dp_setup(dp, dp->test.bw_code, dp->test.link_cnt, + dp->test.enhanced, dp->test.downspread); +} + +static ssize_t zynqmp_dp_pattern_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct dentry *dentry = file->f_path.dentry; + struct zynqmp_dp *dp = file->private_data; + char buf[16]; + ssize_t ret; + + ret = debugfs_file_get(dentry); + if (unlikely(ret)) + return ret; + + mutex_lock(&dp->lock); + ret = snprintf(buf, sizeof(buf), "%s\n", + test_pattern_str[dp->test.pattern]); + mutex_unlock(&dp->lock); + + debugfs_file_put(dentry); + return simple_read_from_buffer(user_buf, count, ppos, buf, ret); +} + +static ssize_t zynqmp_dp_pattern_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct dentry *dentry = file->f_path.dentry; + struct zynqmp_dp *dp = file->private_data; + char buf[16]; + ssize_t ret; + int pattern; + + ret = debugfs_file_get(dentry); + if (unlikely(ret)) + return ret; + + ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, + count); + if (ret < 0) + goto out; + buf[ret] = '\0'; + + pattern = sysfs_match_string(test_pattern_str, buf); + if (pattern < 0) { + ret = -EINVAL; + goto out; + } + + mutex_lock(&dp->lock); + dp->test.pattern = pattern; + if (dp->test.active) + ret = zynqmp_dp_set_test_pattern(dp, dp->test.pattern, + dp->test.custom) ?: ret; + mutex_unlock(&dp->lock); + +out: + debugfs_file_put(dentry); + return ret; +} + +static const struct file_operations fops_zynqmp_dp_pattern = { + .read = zynqmp_dp_pattern_read, + .write = zynqmp_dp_pattern_write, + .open = simple_open, + .llseek = noop_llseek, +}; + +static int zynqmp_dp_enhanced_get(void *data, u64 *val) +{ + struct zynqmp_dp *dp = data; + + mutex_lock(&dp->lock); + *val = dp->test.enhanced; + mutex_unlock(&dp->lock); + return 0; +} + +static int zynqmp_dp_enhanced_set(void *data, u64 val) +{ + struct zynqmp_dp *dp = data; + int ret = 0; + + mutex_lock(&dp->lock); + dp->test.enhanced = val; + if (dp->test.active) + ret = zynqmp_dp_test_setup(dp); + mutex_unlock(&dp->lock); + + return ret; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_enhanced, zynqmp_dp_enhanced_get, + zynqmp_dp_enhanced_set, "%llu\n"); + +static int zynqmp_dp_downspread_get(void *data, u64 *val) +{ + struct zynqmp_dp *dp = data; + + mutex_lock(&dp->lock); + *val = dp->test.downspread; + mutex_unlock(&dp->lock); + return 0; +} + +static int zynqmp_dp_downspread_set(void *data, u64 val) +{ + struct zynqmp_dp *dp = data; + int ret = 0; + + mutex_lock(&dp->lock); + dp->test.downspread = val; + if (dp->test.active) + ret = zynqmp_dp_test_setup(dp); + mutex_unlock(&dp->lock); + + return ret; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_downspread, zynqmp_dp_downspread_get, + zynqmp_dp_downspread_set, "%llu\n"); + +static int zynqmp_dp_active_get(void *data, u64 *val) +{ + struct zynqmp_dp *dp = data; + + mutex_lock(&dp->lock); + *val = dp->test.active; + mutex_unlock(&dp->lock); + return 0; +} + +static int zynqmp_dp_active_set(void *data, u64 val) +{ + struct zynqmp_dp *dp = data; + int ret = 0; + + mutex_lock(&dp->lock); + if (val) { + if (val < 2) { + ret = zynqmp_dp_test_setup(dp); + if (ret) + goto out; + } + + ret = zynqmp_dp_set_test_pattern(dp, dp->test.pattern, + dp->test.custom); + if (ret) + goto out; + + ret = zynqmp_dp_update_vs_emph(dp, dp->test.train_set); + if (ret) + goto out; + + dp->test.active = true; + } else { + int err; + + dp->test.active = false; + err = zynqmp_dp_set_test_pattern(dp, TEST_VIDEO, NULL); + if (err) + dev_warn(dp->dev, "could not clear test pattern: %d\n", + err); + zynqmp_dp_train_loop(dp); + } +out: + mutex_unlock(&dp->lock); + + return ret; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_active, zynqmp_dp_active_get, + zynqmp_dp_active_set, "%llu\n"); + +static ssize_t zynqmp_dp_custom_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct dentry *dentry = file->f_path.dentry; + struct zynqmp_dp *dp = file->private_data; + ssize_t ret; + + ret = debugfs_file_get(dentry); + if (unlikely(ret)) + return ret; + + mutex_lock(&dp->lock); + ret = simple_read_from_buffer(user_buf, count, ppos, &dp->test.custom, + sizeof(dp->test.custom)); + mutex_unlock(&dp->lock); + + debugfs_file_put(dentry); + return ret; +} + +static ssize_t zynqmp_dp_custom_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct dentry *dentry = file->f_path.dentry; + struct zynqmp_dp *dp = file->private_data; + ssize_t ret; + char buf[sizeof(dp->test.custom)]; + + ret = debugfs_file_get(dentry); + if (unlikely(ret)) + return ret; + + ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count); + if (ret < 0) + goto out; + + mutex_lock(&dp->lock); + memcpy(dp->test.custom, buf, ret); + if (dp->test.active) + ret = zynqmp_dp_set_test_pattern(dp, dp->test.pattern, + dp->test.custom) ?: ret; + mutex_unlock(&dp->lock); + +out: + debugfs_file_put(dentry); + return ret; +} + +static const struct file_operations fops_zynqmp_dp_custom = { + .read = zynqmp_dp_custom_read, + .write = zynqmp_dp_custom_write, + .open = simple_open, + .llseek = noop_llseek, +}; + +static int zynqmp_dp_swing_get(void *data, u64 *val) +{ + struct zynqmp_dp_train_set_priv *priv = data; + struct zynqmp_dp *dp = priv->dp; + + mutex_lock(&dp->lock); + *val = dp->test.train_set[priv->lane] & DP_TRAIN_VOLTAGE_SWING_MASK; + mutex_unlock(&dp->lock); + return 0; +} + +static int zynqmp_dp_swing_set(void *data, u64 val) +{ + struct zynqmp_dp_train_set_priv *priv = data; + struct zynqmp_dp *dp = priv->dp; + u8 *train_set = &dp->test.train_set[priv->lane]; + int ret = 0; + + if (val > 3) + return -EINVAL; + + mutex_lock(&dp->lock); + *train_set &= ~(DP_TRAIN_MAX_SWING_REACHED | + DP_TRAIN_VOLTAGE_SWING_MASK); + *train_set |= val; + if (val == 3) + *train_set |= DP_TRAIN_MAX_SWING_REACHED; + + if (dp->test.active) + ret = zynqmp_dp_update_vs_emph(dp, dp->test.train_set); + mutex_unlock(&dp->lock); + + return ret; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_swing, zynqmp_dp_swing_get, + zynqmp_dp_swing_set, "%llu\n"); + +static int zynqmp_dp_preemphasis_get(void *data, u64 *val) +{ + struct zynqmp_dp_train_set_priv *priv = data; + struct zynqmp_dp *dp = priv->dp; + + mutex_lock(&dp->lock); + *val = FIELD_GET(DP_TRAIN_PRE_EMPHASIS_MASK, + dp->test.train_set[priv->lane]); + mutex_unlock(&dp->lock); + return 0; +} + +static int zynqmp_dp_preemphasis_set(void *data, u64 val) +{ + struct zynqmp_dp_train_set_priv *priv = data; + struct zynqmp_dp *dp = priv->dp; + u8 *train_set = &dp->test.train_set[priv->lane]; + int ret = 0; + + if (val > 2) + return -EINVAL; + + mutex_lock(&dp->lock); + *train_set &= ~(DP_TRAIN_MAX_PRE_EMPHASIS_REACHED | + DP_TRAIN_PRE_EMPHASIS_MASK); + *train_set |= val; + if (val == 2) + *train_set |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; + + if (dp->test.active) + ret = zynqmp_dp_update_vs_emph(dp, dp->test.train_set); + mutex_unlock(&dp->lock); + + return ret; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_preemphasis, zynqmp_dp_preemphasis_get, + zynqmp_dp_preemphasis_set, "%llu\n"); + +static int zynqmp_dp_lanes_get(void *data, u64 *val) +{ + struct zynqmp_dp *dp = data; + + mutex_lock(&dp->lock); + *val = dp->test.link_cnt; + mutex_unlock(&dp->lock); + return 0; +} + +static int zynqmp_dp_lanes_set(void *data, u64 val) +{ + struct zynqmp_dp *dp = data; + int ret = 0; + + if (val > ZYNQMP_DP_MAX_LANES) + return -EINVAL; + + mutex_lock(&dp->lock); + if (val > dp->num_lanes) { + ret = -EINVAL; + } else { + dp->test.link_cnt = val; + if (dp->test.active) + ret = zynqmp_dp_test_setup(dp); + } + mutex_unlock(&dp->lock); + + return ret; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_lanes, zynqmp_dp_lanes_get, + zynqmp_dp_lanes_set, "%llu\n"); + +static int zynqmp_dp_rate_get(void *data, u64 *val) +{ + struct zynqmp_dp *dp = data; + + mutex_lock(&dp->lock); + *val = drm_dp_bw_code_to_link_rate(dp->test.bw_code) * 10000; + mutex_unlock(&dp->lock); + return 0; +} + +static int zynqmp_dp_rate_set(void *data, u64 val) +{ + struct zynqmp_dp *dp = data; + int link_rate; + int ret = 0; + u8 bw_code; + + if (do_div(val, 10000)) + return -EINVAL; + + bw_code = drm_dp_link_rate_to_bw_code(val); + link_rate = drm_dp_bw_code_to_link_rate(bw_code); + if (val != link_rate) + return -EINVAL; + + if (bw_code != DP_LINK_BW_1_62 && bw_code != DP_LINK_BW_2_7 && + bw_code != DP_LINK_BW_5_4) + return -EINVAL; + + mutex_lock(&dp->lock); + dp->test.bw_code = bw_code; + if (dp->test.active) + ret = zynqmp_dp_test_setup(dp); + mutex_unlock(&dp->lock); + + return ret; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_rate, zynqmp_dp_rate_get, + zynqmp_dp_rate_set, "%llu\n"); + +static int zynqmp_dp_ignore_aux_errors_get(void *data, u64 *val) +{ + struct zynqmp_dp *dp = data; + + mutex_lock(&dp->aux.hw_mutex); + *val = dp->ignore_aux_errors; + mutex_unlock(&dp->aux.hw_mutex); + return 0; +} + +static int zynqmp_dp_ignore_aux_errors_set(void *data, u64 val) +{ + struct zynqmp_dp *dp = data; + + if (val != !!val) + return -EINVAL; + + mutex_lock(&dp->aux.hw_mutex); + dp->ignore_aux_errors = val; + mutex_unlock(&dp->aux.hw_mutex); + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_ignore_aux_errors, + zynqmp_dp_ignore_aux_errors_get, + zynqmp_dp_ignore_aux_errors_set, "%llu\n"); + +static int zynqmp_dp_ignore_hpd_get(void *data, u64 *val) +{ + struct zynqmp_dp *dp = data; + + mutex_lock(&dp->lock); + *val = dp->ignore_hpd; + mutex_unlock(&dp->lock); + return 0; +} + +static int zynqmp_dp_ignore_hpd_set(void *data, u64 val) +{ + struct zynqmp_dp *dp = data; + + if (val != !!val) + return -EINVAL; + + mutex_lock(&dp->lock); + dp->ignore_hpd = val; + mutex_lock(&dp->lock); + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_ignore_hpd, zynqmp_dp_ignore_hpd_get, + zynqmp_dp_ignore_hpd_set, "%llu\n"); + +static void zynqmp_dp_bridge_debugfs_init(struct drm_bridge *bridge, + struct dentry *root) +{ + struct zynqmp_dp *dp = bridge_to_dp(bridge); + struct dentry *test; + int i; + + dp->test.bw_code = DP_LINK_BW_5_4; + dp->test.link_cnt = dp->num_lanes; + + test = debugfs_create_dir("test", root); +#define CREATE_FILE(name) \ + debugfs_create_file(#name, 0600, test, dp, &fops_zynqmp_dp_##name) + CREATE_FILE(pattern); + CREATE_FILE(enhanced); + CREATE_FILE(downspread); + CREATE_FILE(active); + CREATE_FILE(custom); + CREATE_FILE(rate); + CREATE_FILE(lanes); + CREATE_FILE(ignore_aux_errors); + CREATE_FILE(ignore_hpd); + + for (i = 0; i < dp->num_lanes; i++) { + static const char fmt[] = "lane%d_preemphasis"; + char name[sizeof(fmt)]; + + dp->debugfs_train_set[i].dp = dp; + dp->debugfs_train_set[i].lane = i; + + snprintf(name, sizeof(name), fmt, i); + debugfs_create_file(name, 0600, test, + &dp->debugfs_train_set[i], + &fops_zynqmp_dp_preemphasis); + + snprintf(name, sizeof(name), "lane%d_swing", i); + debugfs_create_file(name, 0600, test, + &dp->debugfs_train_set[i], + &fops_zynqmp_dp_swing); + } +} + static const struct drm_bridge_funcs zynqmp_dp_bridge_funcs = { .attach = zynqmp_dp_bridge_attach, .detach = zynqmp_dp_bridge_detach, @@ -1618,6 +2334,7 @@ static const struct drm_bridge_funcs zynqmp_dp_bridge_funcs = { .detect = zynqmp_dp_bridge_detect, .edid_read = zynqmp_dp_bridge_edid_read, .atomic_get_input_bus_fmts = zynqmp_dp_bridge_get_input_bus_fmts, + .debugfs_init = zynqmp_dp_bridge_debugfs_init, }; /* ----------------------------------------------------------------------------- @@ -1651,10 +2368,46 @@ static void zynqmp_dp_hpd_work_func(struct work_struct *work) struct zynqmp_dp *dp = container_of(work, struct zynqmp_dp, hpd_work); enum drm_connector_status status; - status = zynqmp_dp_bridge_detect(&dp->bridge); + mutex_lock(&dp->lock); + if (dp->ignore_hpd) { + mutex_unlock(&dp->lock); + return; + } + + status = __zynqmp_dp_bridge_detect(dp); + mutex_unlock(&dp->lock); + drm_bridge_hpd_notify(&dp->bridge, status); } +static void zynqmp_dp_hpd_irq_work_func(struct work_struct *work) +{ + struct zynqmp_dp *dp = container_of(work, struct zynqmp_dp, + hpd_irq_work); + u8 status[DP_LINK_STATUS_SIZE + 2]; + int err; + + mutex_lock(&dp->lock); + if (dp->ignore_hpd) { + mutex_unlock(&dp->lock); + return; + } + + err = drm_dp_dpcd_read(&dp->aux, DP_SINK_COUNT, status, + DP_LINK_STATUS_SIZE + 2); + if (err < 0) { + dev_dbg_ratelimited(dp->dev, + "could not read sink status: %d\n", err); + } else { + if (status[4] & DP_LINK_STATUS_UPDATED || + !drm_dp_clock_recovery_ok(&status[2], dp->mode.lane_cnt) || + !drm_dp_channel_eq_ok(&status[2], dp->mode.lane_cnt)) { + zynqmp_dp_train_loop(dp); + } + } + mutex_unlock(&dp->lock); +} + static irqreturn_t zynqmp_dp_irq_handler(int irq, void *data) { struct zynqmp_dp *dp = (struct zynqmp_dp *)data; @@ -1686,23 +2439,15 @@ static irqreturn_t zynqmp_dp_irq_handler(int irq, void *data) if (status & ZYNQMP_DP_INT_HPD_EVENT) schedule_work(&dp->hpd_work); - if (status & ZYNQMP_DP_INT_HPD_IRQ) { - int ret; - u8 status[DP_LINK_STATUS_SIZE + 2]; + if (status & ZYNQMP_DP_INT_HPD_IRQ) + schedule_work(&dp->hpd_irq_work); - ret = drm_dp_dpcd_read(&dp->aux, DP_SINK_COUNT, status, - DP_LINK_STATUS_SIZE + 2); - if (ret < 0) - goto handled; + if (status & ZYNQMP_DP_INTERRUPT_SIGNAL_STATE_REPLY) + complete(&dp->aux_done); - if (status[4] & DP_LINK_STATUS_UPDATED || - !drm_dp_clock_recovery_ok(&status[2], dp->mode.lane_cnt) || - !drm_dp_channel_eq_ok(&status[2], dp->mode.lane_cnt)) { - zynqmp_dp_train_loop(dp); - } - } + if (status & ZYNQMP_DP_INTERRUPT_SIGNAL_STATE_REPLY_TIMEOUT) + complete(&dp->aux_done); -handled: return IRQ_HANDLED; } @@ -1725,8 +2470,11 @@ int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub) dp->dev = &pdev->dev; dp->dpsub = dpsub; dp->status = connector_status_disconnected; + mutex_init(&dp->lock); + init_completion(&dp->aux_done); INIT_WORK(&dp->hpd_work, zynqmp_dp_hpd_work_func); + INIT_WORK(&dp->hpd_irq_work, zynqmp_dp_hpd_irq_work_func); /* Acquire all resources (IOMEM, IRQ and PHYs). */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dp"); @@ -1802,9 +2550,8 @@ int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub) * Now that the hardware is initialized and won't generate spurious * interrupts, request the IRQ. */ - ret = devm_request_threaded_irq(dp->dev, dp->irq, NULL, - zynqmp_dp_irq_handler, IRQF_ONESHOT, - dev_name(dp->dev), dp); + ret = devm_request_irq(dp->dev, dp->irq, zynqmp_dp_irq_handler, + IRQF_SHARED, dev_name(dp->dev), dp); if (ret < 0) goto err_phy_exit; @@ -1829,8 +2576,9 @@ void zynqmp_dp_remove(struct zynqmp_dpsub *dpsub) struct zynqmp_dp *dp = dpsub->dp; zynqmp_dp_write(dp, ZYNQMP_DP_INT_DS, ZYNQMP_DP_INT_ALL); - disable_irq(dp->irq); + devm_free_irq(dp->dev, dp->irq, dp); + cancel_work_sync(&dp->hpd_irq_work); cancel_work_sync(&dp->hpd_work); zynqmp_dp_write(dp, ZYNQMP_DP_TRANSMITTER_ENABLE, 0); @@ -1838,4 +2586,5 @@ void zynqmp_dp_remove(struct zynqmp_dpsub *dpsub) zynqmp_dp_phy_exit(dp); zynqmp_dp_reset(dp, true); + mutex_destroy(&dp->lock); } diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c index f5781939de9c..07c4d184e7a1 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c +++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c @@ -320,7 +320,7 @@ MODULE_DEVICE_TABLE(of, zynqmp_dpsub_of_match); static struct platform_driver zynqmp_dpsub_driver = { .probe = zynqmp_dpsub_probe, - .remove_new = zynqmp_dpsub_remove, + .remove = zynqmp_dpsub_remove, .shutdown = zynqmp_dpsub_shutdown, .driver = { .name = "zynqmp-dpsub", diff --git a/drivers/gpu/drm/xlnx/zynqmp_kms.c b/drivers/gpu/drm/xlnx/zynqmp_kms.c index 2452c2f09161..fc81983d9e5e 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_kms.c +++ b/drivers/gpu/drm/xlnx/zynqmp_kms.c @@ -511,12 +511,12 @@ int zynqmp_dpsub_drm_init(struct zynqmp_dpsub *dpsub) if (ret) return ret; - drm_kms_helper_poll_init(drm); - ret = zynqmp_dpsub_kms_init(dpsub); if (ret < 0) goto err_poll_fini; + drm_kms_helper_poll_init(drm); + /* Reset all components and register the DRM device. */ drm_mode_config_reset(drm); @@ -538,7 +538,7 @@ void zynqmp_dpsub_drm_cleanup(struct zynqmp_dpsub *dpsub) { struct drm_device *drm = &dpsub->drm->dev; - drm_dev_unregister(drm); + drm_dev_unplug(drm); drm_atomic_helper_shutdown(drm); drm_encoder_cleanup(&dpsub->drm->encoder); drm_kms_helper_poll_fini(drm); diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index be2ad7203d7b..7b1d091f3c09 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c @@ -769,7 +769,7 @@ static struct platform_driver tegra_host1x_driver = { .pm = &host1x_pm_ops, }, .probe = host1x_probe, - .remove_new = host1x_remove, + .remove = host1x_remove, }; static struct platform_driver * const drivers[] = { diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index 3535be9daa1f..947323f4a234 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c @@ -1467,7 +1467,7 @@ static struct platform_driver imx_ipu_driver = { .of_match_table = imx_ipu_dt_ids, }, .probe = ipu_probe, - .remove_new = ipu_remove, + .remove = ipu_remove, }; static struct platform_driver * const drivers[] = { diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/ipu-v3/ipu-pre.c index 41bd5dbd7356..7aac70368b00 100644 --- a/drivers/gpu/ipu-v3/ipu-pre.c +++ b/drivers/gpu/ipu-v3/ipu-pre.c @@ -374,7 +374,7 @@ static const struct of_device_id ipu_pre_dt_ids[] = { struct platform_driver ipu_pre_drv = { .probe = ipu_pre_probe, - .remove_new = ipu_pre_remove, + .remove = ipu_pre_remove, .driver = { .name = "imx-ipu-pre", .of_match_table = ipu_pre_dt_ids, diff --git a/drivers/gpu/ipu-v3/ipu-prg.c b/drivers/gpu/ipu-v3/ipu-prg.c index afb2d72e9175..d38d3ba54d72 100644 --- a/drivers/gpu/ipu-v3/ipu-prg.c +++ b/drivers/gpu/ipu-v3/ipu-prg.c @@ -469,7 +469,7 @@ static const struct of_device_id ipu_prg_dt_ids[] = { struct platform_driver ipu_prg_drv = { .probe = ipu_prg_probe, - .remove_new = ipu_prg_remove, + .remove = ipu_prg_remove, .driver = { .name = "imx-ipu-prg", .pm = &prg_pm_ops, |
