summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aldebaran.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c105
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c96
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h97
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c243
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c203
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v3_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.c80
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_7.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c15
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c19
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c26
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c185
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.h4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c247
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.h9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c45
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/Makefile14
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c501
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h118
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c829
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c81
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c29
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c11
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c22
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c232
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c216
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/conversion.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c87
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c94
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c108
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c43
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c394
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c187
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c429
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_state.c867
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c129
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h69
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_bios_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c275
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h59
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_plane.h38
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_plane_priv.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_state.h78
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_state_priv.h102
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h78
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream_priv.h37
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c96
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn302/Makefile12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c140
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn35/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c62
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.h51
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_pp_smu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c115
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c110
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c89
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c37
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h39
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/Makefile28
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c38
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c43
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.c (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.h (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c91
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.h (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.c (renamed from drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.h (renamed from drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.c (renamed from drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.h (renamed from drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c (renamed from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.h (renamed from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c (renamed from drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.h (renamed from drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn302/dcn302_init.c (renamed from drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn302/dcn302_init.h (renamed from drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_init.c (renamed from drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_init.h (renamed from drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c (renamed from drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.h (renamed from drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c (renamed from drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.h (renamed from drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c116
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c (renamed from drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.h (renamed from drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c229
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.c)5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.h (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/CMakeLists.txt4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/Makefile17
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c171
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h23
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/abm.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.c90
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_factory.c60
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.c60
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c35
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c379
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c45
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c134
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c24
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h22
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h16
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c17
-rw-r--r--drivers/gpu/drm/amd/display/include/audio_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h2
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c4
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c4
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c6
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h10
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c4
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h10
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h28
-rw-r--r--drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c13
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c32
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.h5
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h5
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h8
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_10_0_2_offset.h102
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_10_0_2_sh_mask.h184
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h97
-rw-r--r--drivers/gpu/drm/amd/include/mes_v11_api_def.h4
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c10
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c36
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h1
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c4
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c52
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c5
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c23
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c1
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c208
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h52
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h80
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h100
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_7_ppsmc.h3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c89
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c48
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c196
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c15
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c66
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_internal.h3
-rw-r--r--drivers/gpu/drm/arm/malidp_crtc.c2
-rw-r--r--drivers/gpu/drm/ast/ast_i2c.c1
-rw-r--r--drivers/gpu/drm/bridge/Kconfig1
-rw-r--r--drivers/gpu/drm/bridge/aux-bridge.c3
-rw-r--r--drivers/gpu/drm/bridge/aux-hpd-bridge.c6
-rw-r--r--drivers/gpu/drm/bridge/panel.c17
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8640.c7
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c1
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c20
-rw-r--r--drivers/gpu/drm/ci/arm64.config1
-rw-r--r--drivers/gpu/drm/ci/build.sh19
-rw-r--r--drivers/gpu/drm/ci/gitlab-ci.yml2
-rwxr-xr-xdrivers/gpu/drm/ci/igt_runner.sh10
-rw-r--r--drivers/gpu/drm/ci/test.yml13
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt13
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt46
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper.c1
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c1
-rw-r--r--drivers/gpu/drm/drm_atomic.c1
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c94
-rw-r--r--drivers/gpu/drm/drm_atomic_state_helper.c1
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c52
-rw-r--r--drivers/gpu/drm/drm_auth.c2
-rw-r--r--drivers/gpu/drm/drm_crtc.c8
-rw-r--r--drivers/gpu/drm/drm_debugfs.c2
-rw-r--r--drivers/gpu/drm/drm_edid.c3
-rw-r--r--drivers/gpu/drm/drm_exec.c13
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c5
-rw-r--r--drivers/gpu/drm/drm_gpuvm.c6
-rw-r--r--drivers/gpu/drm/drm_modes.c6
-rw-r--r--drivers/gpu/drm/drm_plane.c1
-rw-r--r--drivers/gpu/drm/drm_prime.c33
-rw-r--r--drivers/gpu/drm/drm_property.c59
-rw-r--r--drivers/gpu/drm/drm_syncobj.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dma.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c16
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_scaler.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c6
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c1
-rw-r--r--drivers/gpu/drm/gma500/intel_gmbus.c1
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c1
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c1
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c1
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_drv.c8
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug18
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c111
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.c107
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c48
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c35
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_core.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c147
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_regs.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c31
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c25
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c21
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c11
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context_types.h7
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c26
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_internal.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c21
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h12
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c10
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c21
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c8
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c22
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c8
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c14
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_context.c4
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.c43
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.c13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_regs.h8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_user.c39
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c23
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gsc.h7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h9
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_mcr.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.h38
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_regs.h6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c26
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c100
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c41
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_cs.c20
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_gt_pm.c5
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c65
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_reset.c10
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rps.c17
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_slpc.c5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h79
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c11
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.c10
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c23
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc.c115
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c13
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c4
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c108
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c6
-rw-r--r--drivers/gpu/drm/i915/i915_drm_client.c108
-rw-r--r--drivers/gpu/drm/i915/i915_drm_client.h42
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c194
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h44
-rw-r--r--drivers/gpu/drm/i915/i915_hwmon.c4
-rw-r--r--drivers/gpu/drm/i915/i915_memcpy.c2
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c39
-rw-r--r--drivers/gpu/drm/i915/i915_perf_types.h9
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c77
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c79
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.c19
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.h1
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c221
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.h11
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.c35
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.h73
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.c18
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_irq.c5
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_session.c6
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_types.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_live_test.c9
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_live_test.h3
-rw-r--r--drivers/gpu/drm/imagination/pvr_device.h46
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_trace.c2
-rw-r--r--drivers/gpu/drm/imagination/pvr_hwrt.c27
-rw-r--r--drivers/gpu/drm/imagination/pvr_job.c2
-rw-r--r--drivers/gpu/drm/imagination/pvr_mmu.c4
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm.c32
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm_mips.c6
-rw-r--r--drivers/gpu/drm/lima/lima_ctx.c1
-rw-r--r--drivers/gpu/drm/loongson/lsdc_i2c.c1
-rw-r--r--drivers/gpu/drm/mediatek/Makefile3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_cec.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_aal.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ccorr.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_drv.h8
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_gamma.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_merge.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c248
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dp.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c16
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c24
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h20
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c10
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.h2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mdp_rdma.c19
-rw-r--r--drivers/gpu/drm/mediatek/mtk_padding.c160
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200er.c5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200ev.c5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200se.c5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_i2c.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c10
-rw-r--r--drivers/gpu/drm/msm/Kconfig2
-rw-r--r--drivers/gpu/drm/msm/Makefile1
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c21
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c122
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c3
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h457
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h17
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h17
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h104
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h17
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h32
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h17
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h11
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h51
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h16
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h25
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h51
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h33
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c29
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c186
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h21
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c75
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c55
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c130
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c223
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h72
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c247
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h142
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c52
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h28
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c12
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c16
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h12
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h11
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c22
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h17
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c20
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h15
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h13
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c15
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c37
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h37
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c17
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c70
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h17
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h13
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c79
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c105
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c141
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h13
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c42
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c32
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c37
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c87
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c24
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h1
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c30
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c21
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h1
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c29
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c25
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c10
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h4
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c10
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h4
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c19
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h1
-rw-r--r--drivers/gpu/drm/msm/dp/dp_aux.c39
-rw-r--r--drivers/gpu/drm/msm/dp/dp_debug.c69
-rw-r--r--drivers/gpu/drm/msm/dp/dp_debug.h23
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c369
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.h4
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.c30
-rw-r--r--drivers/gpu/drm/msm/dp/dp_power.c32
-rw-r--r--drivers/gpu/drm/msm/dp/dp_power.h11
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c17
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.h1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c10
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c27
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_i2c.c1
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c41
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c94
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h15
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c7
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h17
-rw-r--r--drivers/gpu/drm/msm/msm_gem_shrinker.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c233
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c44
-rw-r--r--drivers/gpu/drm/msm/msm_mdss.c106
-rw-r--r--drivers/gpu/drm/msm/msm_mdss.h1
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c3
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h17
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h51
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_os_nvoc.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c28
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_uvmm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vmm.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c171
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/uoutp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c174
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c2
-rw-r--r--drivers/gpu/drm/panel/Kconfig18
-rw-r--r--drivers/gpu/drm/panel/Makefile2
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c1
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c3
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9805.c405
-rw-r--r--drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c2
-rw-r--r--drivers/gpu/drm/panel/panel-newvision-nv3052c.c515
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36523.c4
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c34
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7701.c138
-rw-r--r--drivers/gpu/drm/panel/panel-synaptics-r63353.c362
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.c17
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c1
-rw-r--r--drivers/gpu/drm/radeon/dce3_1_afmt.c1
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c1
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c1
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c8
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.c4
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c4
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c1
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c1
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c1
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c1
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c2
-rw-r--r--drivers/gpu/drm/rockchip/rk3066_hdmi.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h18
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h12
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.c498
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.h100
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.c1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop2_reg.c223
-rw-r--r--drivers/gpu/drm/solomon/ssd130x.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c1
-rw-r--r--drivers/gpu/drm/tests/drm_exec_test.c16
-rw-r--r--drivers/gpu/drm/tests/drm_kunit_helpers.c78
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_device_test.c2
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_pool_test.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c22
-rw-r--r--drivers/gpu/drm/v3d/v3d_debugfs.c20
-rw-r--r--drivers/gpu/drm/vc4/tests/vc4_mock.c9
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c12
-rw-r--r--drivers/gpu/drm/vkms/vkms_writeback.c25
-rw-r--r--drivers/gpu/drm/xe/Kconfig6
-rw-r--r--drivers/gpu/drm/xe/xe_exec.c2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.c4
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c8
713 files changed, 17086 insertions, 7039 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
index 02f4c6f9d4f6..576067d66bb9 100644
--- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c
+++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
@@ -330,6 +330,7 @@ aldebaran_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
{
struct list_head *reset_device_list = reset_context->reset_device_list;
struct amdgpu_device *tmp_adev = NULL;
+ struct amdgpu_ras *con;
int r;
if (reset_device_list == NULL)
@@ -355,7 +356,30 @@ aldebaran_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
*/
amdgpu_register_gpu_instance(tmp_adev);
- /* Resume RAS */
+ /* Resume RAS, ecc_irq */
+ con = amdgpu_ras_get_context(tmp_adev);
+ if (!amdgpu_sriov_vf(tmp_adev) && con) {
+ if (tmp_adev->sdma.ras &&
+ tmp_adev->sdma.ras->ras_block.ras_late_init) {
+ r = tmp_adev->sdma.ras->ras_block.ras_late_init(tmp_adev,
+ &tmp_adev->sdma.ras->ras_block.ras_comm);
+ if (r) {
+ dev_err(tmp_adev->dev, "SDMA failed to execute ras_late_init! ret:%d\n", r);
+ goto end;
+ }
+ }
+
+ if (tmp_adev->gfx.ras &&
+ tmp_adev->gfx.ras->ras_block.ras_late_init) {
+ r = tmp_adev->gfx.ras->ras_block.ras_late_init(tmp_adev,
+ &tmp_adev->gfx.ras->ras_block.ras_comm);
+ if (r) {
+ dev_err(tmp_adev->dev, "GFX failed to execute ras_late_init! ret:%d\n", r);
+ goto end;
+ }
+ }
+ }
+
amdgpu_ras_resume(tmp_adev);
/* Update PSP FW topology after reset */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 0af8ac81facd..3d8a48f46b01 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -252,6 +252,8 @@ extern int amdgpu_seamless;
extern int amdgpu_user_partt_mode;
extern int amdgpu_agp;
+extern int amdgpu_wbrf;
+
#define AMDGPU_VM_MAX_NUM_CTX 4096
#define AMDGPU_SG_THRESHOLD (256*1024*1024)
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
@@ -789,6 +791,7 @@ struct amdgpu_mqd_prop {
uint64_t eop_gpu_addr;
uint32_t hqd_pipe_priority;
uint32_t hqd_queue_priority;
+ bool allow_tunneling;
bool hqd_active;
};
@@ -1141,6 +1144,7 @@ struct amdgpu_device {
bool debug_vm;
bool debug_largebar;
bool debug_disable_soft_recovery;
+ bool debug_use_vram_fw_buf;
};
static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 2d22f7d45512..77e263660288 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -138,10 +138,14 @@ static void amdgpu_amdkfd_reset_work(struct work_struct *work)
amdgpu_device_gpu_recover(adev, NULL, &reset_context);
}
+static const struct drm_client_funcs kfd_client_funcs = {
+ .unregister = drm_client_release,
+};
void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
{
int i;
int last_valid_bit;
+ int ret;
amdgpu_amdkfd_gpuvm_init_mem_limits();
@@ -160,6 +164,12 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
.enable_mes = adev->enable_mes,
};
+ ret = drm_client_init(&adev->ddev, &adev->kfd.client, "kfd", &kfd_client_funcs);
+ if (ret) {
+ dev_err(adev->dev, "Failed to init DRM client: %d\n", ret);
+ return;
+ }
+
/* this is going to have a few of the MSBs set that we need to
* clear
*/
@@ -198,6 +208,10 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
adev->kfd.init_complete = kgd2kfd_device_init(adev->kfd.dev,
&gpu_resources);
+ if (adev->kfd.init_complete)
+ drm_client_register(&adev->kfd.client);
+ else
+ drm_client_release(&adev->kfd.client);
amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
@@ -684,10 +698,8 @@ err:
void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle)
{
enum amd_powergating_state state = idle ? AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE;
- /* Temporary workaround to fix issues observed in some
- * compute applications when GFXOFF is enabled on GFX11.
- */
- if (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 11) {
+ if (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 11 &&
+ ((adev->mes.kiq_version & AMDGPU_MES_VERSION_MASK) <= 64)) {
pr_debug("GFXOFF is %s\n", idle ? "enabled" : "disabled");
amdgpu_gfx_off_ctrl(adev, idle);
} else if ((IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 9) &&
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 16794c2eea35..f262b9d89541 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -33,6 +33,7 @@
#include <linux/mmu_notifier.h>
#include <linux/memremap.h>
#include <kgd_kfd_interface.h>
+#include <drm/drm_client.h>
#include "amdgpu_sync.h"
#include "amdgpu_vm.h"
#include "amdgpu_xcp.h"
@@ -83,6 +84,7 @@ struct kgd_mem {
struct amdgpu_sync sync;
+ uint32_t gem_handle;
bool aql_queue;
bool is_imported;
};
@@ -105,6 +107,9 @@ struct amdgpu_kfd_dev {
/* HMM page migration MEMORY_DEVICE_PRIVATE mapping */
struct dev_pagemap pgmap;
+
+ /* Client for KFD BO GEM handle allocations */
+ struct drm_client_dev client;
};
enum kgd_engine_type {
@@ -306,14 +311,13 @@ void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem);
int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_device *adev, struct amdgpu_bo *bo);
int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info,
- struct dma_fence **ef);
+ struct dma_fence __rcu **ef);
int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev,
struct kfd_vm_fault_info *info);
-int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev,
- struct dma_buf *dmabuf,
- uint64_t va, void *drm_priv,
- struct kgd_mem **mem, uint64_t *size,
- uint64_t *mmap_offset);
+int amdgpu_amdkfd_gpuvm_import_dmabuf_fd(struct amdgpu_device *adev, int fd,
+ uint64_t va, void *drm_priv,
+ struct kgd_mem **mem, uint64_t *size,
+ uint64_t *mmap_offset);
int amdgpu_amdkfd_gpuvm_export_dmabuf(struct kgd_mem *mem,
struct dma_buf **dmabuf);
void amdgpu_amdkfd_debug_mem_fence(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
index 469785d33791..1ef758ac5076 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
@@ -90,7 +90,7 @@ struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f)
return NULL;
fence = container_of(f, struct amdgpu_amdkfd_fence, base);
- if (fence && f->ops == &amdkfd_fence_ops)
+ if (f->ops == &amdkfd_fence_ops)
return fence;
return NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 73288f9ccaf8..f183d7faeeec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -25,6 +25,7 @@
#include <linux/pagemap.h>
#include <linux/sched/mm.h>
#include <linux/sched/task.h>
+#include <linux/fdtable.h>
#include <drm/ttm/ttm_tt.h>
#include <drm/drm_exec.h>
@@ -806,13 +807,22 @@ kfd_mem_dmaunmap_attachment(struct kgd_mem *mem,
static int kfd_mem_export_dmabuf(struct kgd_mem *mem)
{
if (!mem->dmabuf) {
- struct dma_buf *ret = amdgpu_gem_prime_export(
- &mem->bo->tbo.base,
+ struct amdgpu_device *bo_adev;
+ struct dma_buf *dmabuf;
+ int r, fd;
+
+ bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
+ r = drm_gem_prime_handle_to_fd(&bo_adev->ddev, bo_adev->kfd.client.file,
+ mem->gem_handle,
mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
- DRM_RDWR : 0);
- if (IS_ERR(ret))
- return PTR_ERR(ret);
- mem->dmabuf = ret;
+ DRM_RDWR : 0, &fd);
+ if (r)
+ return r;
+ dmabuf = dma_buf_get(fd);
+ close_fd(fd);
+ if (WARN_ON_ONCE(IS_ERR(dmabuf)))
+ return PTR_ERR(dmabuf);
+ mem->dmabuf = dmabuf;
}
return 0;
@@ -1137,7 +1147,7 @@ static int reserve_bo_and_vm(struct kgd_mem *mem,
ctx->n_vms = 1;
ctx->sync = &mem->sync;
- drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+ drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
drm_exec_until_all_locked(&ctx->exec) {
ret = amdgpu_vm_lock_pd(vm, &ctx->exec, 2);
drm_exec_retry_on_contention(&ctx->exec);
@@ -1176,7 +1186,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
int ret;
ctx->sync = &mem->sync;
- drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+ drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
drm_exec_until_all_locked(&ctx->exec) {
ctx->n_vms = 0;
list_for_each_entry(entry, &mem->attachments, list) {
@@ -1778,6 +1788,9 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
pr_debug("Failed to allow vma node access. ret %d\n", ret);
goto err_node_allow;
}
+ ret = drm_gem_handle_create(adev->kfd.client.file, gobj, &(*mem)->gem_handle);
+ if (ret)
+ goto err_gem_handle_create;
bo = gem_to_amdgpu_bo(gobj);
if (bo_type == ttm_bo_type_sg) {
bo->tbo.sg = sg;
@@ -1829,6 +1842,8 @@ allocate_init_user_pages_failed:
err_pin_bo:
err_validate_bo:
remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
+ drm_gem_handle_delete(adev->kfd.client.file, (*mem)->gem_handle);
+err_gem_handle_create:
drm_vma_node_revoke(&gobj->vma_node, drm_priv);
err_node_allow:
/* Don't unreserve system mem limit twice */
@@ -1941,8 +1956,11 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
/* Free the BO*/
drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv);
- if (mem->dmabuf)
+ drm_gem_handle_delete(adev->kfd.client.file, mem->gem_handle);
+ if (mem->dmabuf) {
dma_buf_put(mem->dmabuf);
+ mem->dmabuf = NULL;
+ }
mutex_destroy(&mem->lock);
/* If this releases the last reference, it will end up calling
@@ -2294,34 +2312,26 @@ int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev,
return 0;
}
-int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev,
- struct dma_buf *dma_buf,
- uint64_t va, void *drm_priv,
- struct kgd_mem **mem, uint64_t *size,
- uint64_t *mmap_offset)
+static int import_obj_create(struct amdgpu_device *adev,
+ struct dma_buf *dma_buf,
+ struct drm_gem_object *obj,
+ uint64_t va, void *drm_priv,
+ struct kgd_mem **mem, uint64_t *size,
+ uint64_t *mmap_offset)
{
struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
- struct drm_gem_object *obj;
struct amdgpu_bo *bo;
int ret;
- obj = amdgpu_gem_prime_import(adev_to_drm(adev), dma_buf);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
bo = gem_to_amdgpu_bo(obj);
if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
- AMDGPU_GEM_DOMAIN_GTT))) {
+ AMDGPU_GEM_DOMAIN_GTT)))
/* Only VRAM and GTT BOs are supported */
- ret = -EINVAL;
- goto err_put_obj;
- }
+ return -EINVAL;
*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
- if (!*mem) {
- ret = -ENOMEM;
- goto err_put_obj;
- }
+ if (!*mem)
+ return -ENOMEM;
ret = drm_vma_node_allow(&obj->vma_node, drm_priv);
if (ret)
@@ -2371,8 +2381,41 @@ err_remove_mem:
drm_vma_node_revoke(&obj->vma_node, drm_priv);
err_free_mem:
kfree(*mem);
+ return ret;
+}
+
+int amdgpu_amdkfd_gpuvm_import_dmabuf_fd(struct amdgpu_device *adev, int fd,
+ uint64_t va, void *drm_priv,
+ struct kgd_mem **mem, uint64_t *size,
+ uint64_t *mmap_offset)
+{
+ struct drm_gem_object *obj;
+ uint32_t handle;
+ int ret;
+
+ ret = drm_gem_prime_fd_to_handle(&adev->ddev, adev->kfd.client.file, fd,
+ &handle);
+ if (ret)
+ return ret;
+ obj = drm_gem_object_lookup(adev->kfd.client.file, handle);
+ if (!obj) {
+ ret = -EINVAL;
+ goto err_release_handle;
+ }
+
+ ret = import_obj_create(adev, obj->dma_buf, obj, va, drm_priv, mem, size,
+ mmap_offset);
+ if (ret)
+ goto err_put_obj;
+
+ (*mem)->gem_handle = handle;
+
+ return 0;
+
err_put_obj:
drm_gem_object_put(obj);
+err_release_handle:
+ drm_gem_handle_delete(adev->kfd.client.file, handle);
return ret;
}
@@ -2552,7 +2595,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
amdgpu_sync_create(&sync);
- drm_exec_init(&exec, 0);
+ drm_exec_init(&exec, 0, 0);
/* Reserve all BOs and page tables for validation */
drm_exec_until_all_locked(&exec) {
/* Reserve all the page directories */
@@ -2759,7 +2802,7 @@ unlock_out:
put_task_struct(usertask);
}
-static void replace_eviction_fence(struct dma_fence **ef,
+static void replace_eviction_fence(struct dma_fence __rcu **ef,
struct dma_fence *new_ef)
{
struct dma_fence *old_ef = rcu_replace_pointer(*ef, new_ef, true
@@ -2794,7 +2837,7 @@ static void replace_eviction_fence(struct dma_fence **ef,
* 7. Add fence to all PD and PT BOs.
* 8. Unreserve all BOs
*/
-int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
+int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu **ef)
{
struct amdkfd_process_info *process_info = info;
struct amdgpu_vm *peer_vm;
@@ -2810,7 +2853,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
mutex_lock(&process_info->lock);
- drm_exec_init(&exec, 0);
+ drm_exec_init(&exec, 0, 0);
drm_exec_until_all_locked(&exec) {
list_for_each_entry(peer_vm, &process_info->vm_list_head,
vm_list_node) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 96f63fd39b9e..9caba10315a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -1103,7 +1103,7 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
* DDC line. The latter is more complex because with DVI<->HDMI adapters
* you don't really know what's connected to which port as both are digital.
*/
- amdgpu_connector_shared_ddc(&ret, connector, amdgpu_connector);
+ amdgpu_connector_shared_ddc(&ret, connector, amdgpu_connector);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index e50be6500030..6adeddfb3d56 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -66,7 +66,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p,
amdgpu_sync_create(&p->sync);
drm_exec_init(&p->exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
- DRM_EXEC_IGNORE_DUPLICATES);
+ DRM_EXEC_IGNORE_DUPLICATES, 0);
return 0;
}
@@ -870,9 +870,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
struct amdgpu_bo *bo = e->bo;
int i;
- e->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages,
- sizeof(struct page *),
- GFP_KERNEL | __GFP_ZERO);
+ e->user_pages = kvcalloc(bo->tbo.ttm->num_pages,
+ sizeof(struct page *),
+ GFP_KERNEL);
if (!e->user_pages) {
DRM_ERROR("kvmalloc_array failure\n");
r = -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
index 720011019741..796fa6f1420b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
@@ -70,7 +70,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct drm_exec exec;
int r;
- drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
drm_exec_until_all_locked(&exec) {
r = amdgpu_vm_lock_pd(vm, &exec, 0);
if (likely(!r))
@@ -110,7 +110,7 @@ int amdgpu_unmap_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct drm_exec exec;
int r;
- drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
drm_exec_until_all_locked(&exec) {
r = amdgpu_vm_lock_pd(vm, &exec, 0);
if (likely(!r))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index c480192e33f5..e485dd3357c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -540,7 +540,11 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
while (size) {
uint32_t value;
- value = RREG32_PCIE(*pos);
+ if (upper_32_bits(*pos))
+ value = RREG32_PCIE_EXT(*pos);
+ else
+ value = RREG32_PCIE(*pos);
+
r = put_user(value, (uint32_t *)buf);
if (r)
goto out;
@@ -600,7 +604,10 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
if (r)
goto out;
- WREG32_PCIE(*pos, value);
+ if (upper_32_bits(*pos))
+ WREG32_PCIE_EXT(*pos, value);
+ else
+ WREG32_PCIE(*pos, value);
result += 4;
buf += 4;
@@ -755,7 +762,7 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
int r;
if (!adev->smc_rreg)
- return -EPERM;
+ return -EOPNOTSUPP;
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
@@ -814,7 +821,7 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
int r;
if (!adev->smc_wreg)
- return -EPERM;
+ return -EOPNOTSUPP;
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
@@ -2147,6 +2154,8 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
amdgpu_debugfs_firmware_init(adev);
amdgpu_ta_if_debugfs_init(adev);
+ amdgpu_debugfs_mes_event_log_init(adev);
+
#if defined(CONFIG_DRM_AMD_DC)
if (adev->dc_enabled)
dtn_debugfs_init(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
index 371a6f0deb29..0425432d8659 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
@@ -32,3 +32,5 @@ void amdgpu_debugfs_fini(struct amdgpu_device *adev);
void amdgpu_debugfs_fence_init(struct amdgpu_device *adev);
void amdgpu_debugfs_firmware_init(struct amdgpu_device *adev);
void amdgpu_debugfs_gem_init(struct amdgpu_device *adev);
+void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev);
+
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 6c0cf64d465a..b158d27d0a71 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1544,6 +1544,7 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev)
return true;
fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
+ release_firmware(adev->pm.fw);
if (fw_ver < 0x00160e00)
return true;
}
@@ -1599,7 +1600,7 @@ bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev)
if (adev->mman.keep_stolen_vga_memory)
return false;
- return adev->ip_versions[DCE_HWIP][0] >= IP_VERSION(3, 0, 0);
+ return amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0);
}
/*
@@ -2251,15 +2252,8 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
adev->firmware.gpu_info_fw = NULL;
- if (adev->mman.discovery_bin) {
- /*
- * FIXME: The bounding box is still needed by Navi12, so
- * temporarily read it from gpu_info firmware. Should be dropped
- * when DAL no longer needs it.
- */
- if (adev->asic_type != CHIP_NAVI12)
- return 0;
- }
+ if (adev->mman.discovery_bin)
+ return 0;
switch (adev->asic_type) {
default:
@@ -3861,10 +3855,6 @@ static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
adev->gfx.mcbp = true;
else if (amdgpu_mcbp == 0)
adev->gfx.mcbp = false;
- else if ((amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 0, 0)) &&
- (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 0, 0)) &&
- adev->gfx.num_gfx_rings)
- adev->gfx.mcbp = true;
if (amdgpu_sriov_vf(adev))
adev->gfx.mcbp = true;
@@ -4593,8 +4583,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
amdgpu_ras_suspend(adev);
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
-
amdgpu_device_ip_suspend_phase1(adev);
if (!adev->in_s0ix)
@@ -4604,6 +4592,8 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
if (r)
return r;
+ amdgpu_ttm_set_buffer_funcs_status(adev, false);
+
amdgpu_fence_driver_hw_fini(adev);
amdgpu_device_ip_suspend_phase2(adev);
@@ -5256,7 +5246,6 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
struct amdgpu_device *tmp_adev = NULL;
bool need_full_reset, skip_hw_reset, vram_lost = false;
int r = 0;
- bool gpu_reset_for_dev_remove = 0;
/* Try reset handler method first */
tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
@@ -5276,10 +5265,6 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
- gpu_reset_for_dev_remove =
- test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
- test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
-
/*
* ASIC reset has to be done on all XGMI hive nodes ASAP
* to allow proper links negotiation in FW (within 1 sec)
@@ -5322,18 +5307,6 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
amdgpu_ras_intr_cleared();
}
- /* Since the mode1 reset affects base ip blocks, the
- * phase1 ip blocks need to be resumed. Otherwise there
- * will be a BIOS signature error and the psp bootloader
- * can't load kdb on the next amdgpu install.
- */
- if (gpu_reset_for_dev_remove) {
- list_for_each_entry(tmp_adev, device_list_handle, reset_list)
- amdgpu_device_ip_resume_phase1(tmp_adev);
-
- goto end;
- }
-
list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
if (need_full_reset) {
/* post card */
@@ -5570,11 +5543,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
int i, r = 0;
bool need_emergency_restart = false;
bool audio_suspended = false;
- bool gpu_reset_for_dev_remove = false;
-
- gpu_reset_for_dev_remove =
- test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
- test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
/*
* Special case: RAS triggered and full reset isn't supported
@@ -5612,7 +5580,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
list_add_tail(&tmp_adev->reset_list, &device_list);
- if (gpu_reset_for_dev_remove && adev->shutdown)
+ if (adev->shutdown)
tmp_adev->shutdown = true;
}
if (!list_is_first(&adev->reset_list, &device_list))
@@ -5697,10 +5665,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
retry: /* Rest of adevs pre asic reset from XGMI hive. */
list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
- if (gpu_reset_for_dev_remove) {
- /* Workaroud for ASICs need to disable SMC first */
- amdgpu_device_smu_fini_early(tmp_adev);
- }
r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
/*TODO Should we stop ?*/
if (r) {
@@ -5732,9 +5696,6 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
r = amdgpu_do_asic_reset(device_list_handle, reset_context);
if (r && r == -EAGAIN)
goto retry;
-
- if (!r && gpu_reset_for_dev_remove)
- goto recover_end;
}
skip_hw_reset:
@@ -5790,7 +5751,6 @@ skip_sched_resume:
amdgpu_ras_set_error_query_ready(tmp_adev, true);
}
-recover_end:
tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
reset_list);
amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 0431eafa86b5..c7d60dd0fb97 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -1963,8 +1963,6 @@ static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
break;
case IP_VERSION(9, 4, 3):
- if (!amdgpu_exp_hw_support)
- return -EINVAL;
amdgpu_device_ip_block_add(adev, &gfx_v9_4_3_ip_block);
break;
case IP_VERSION(10, 1, 10):
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 8b33b130ea36..cc69005f5b46 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -115,9 +115,10 @@
* 3.54.0 - Add AMDGPU_CTX_QUERY2_FLAGS_RESET_IN_PROGRESS support
* - 3.55.0 - Add AMDGPU_INFO_GPUVM_FAULT query
* - 3.56.0 - Update IB start address and size alignment for decode and encode
+ * - 3.57.0 - Compute tunneling on GFX10+
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 56
+#define KMS_DRIVER_MINOR 57
#define KMS_DRIVER_PATCHLEVEL 0
/*
@@ -127,6 +128,7 @@ enum AMDGPU_DEBUG_MASK {
AMDGPU_DEBUG_VM = BIT(0),
AMDGPU_DEBUG_LARGEBAR = BIT(1),
AMDGPU_DEBUG_DISABLE_GPU_SOFT_RECOVERY = BIT(2),
+ AMDGPU_DEBUG_USE_VRAM_FW_BUF = BIT(3),
};
unsigned int amdgpu_vram_limit = UINT_MAX;
@@ -208,6 +210,7 @@ int amdgpu_umsch_mm;
int amdgpu_seamless = -1; /* auto */
uint amdgpu_debug_mask;
int amdgpu_agp = -1; /* auto */
+int amdgpu_wbrf = -1;
static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work);
@@ -971,6 +974,22 @@ module_param_named(debug_mask, amdgpu_debug_mask, uint, 0444);
MODULE_PARM_DESC(agp, "AGP (-1 = auto (default), 0 = disable, 1 = enable)");
module_param_named(agp, amdgpu_agp, int, 0444);
+/**
+ * DOC: wbrf (int)
+ * Enable Wifi RFI interference mitigation feature.
+ * Due to electrical and mechanical constraints there may be likely interference of
+ * relatively high-powered harmonics of the (G-)DDR memory clocks with local radio
+ * module frequency bands used by Wifi 6/6e/7. To mitigate the possible RFI interference,
+ * with this feature enabled, PMFW will use either “shadowed P-State” or “P-State” based
+ * on active list of frequencies in-use (to be avoided) as part of initial setting or
+ * P-state transition. However, there may be potential performance impact with this
+ * feature enabled.
+ * (0 = disabled, 1 = enabled, -1 = auto (default setting, will be enabled if supported))
+ */
+MODULE_PARM_DESC(wbrf,
+ "Enable Wifi RFI interference mitigation (0 = disabled, 1 = enabled, -1 = auto(default)");
+module_param_named(wbrf, amdgpu_wbrf, int, 0444);
+
/* These devices are not supported by amdgpu.
* They are supported by the mach64, r128, radeon drivers
*/
@@ -2099,6 +2118,11 @@ static void amdgpu_init_debug_options(struct amdgpu_device *adev)
pr_info("debug: soft reset for GPU recovery disabled\n");
adev->debug_disable_soft_recovery = true;
}
+
+ if (amdgpu_debug_mask & AMDGPU_DEBUG_USE_VRAM_FW_BUF) {
+ pr_info("debug: place fw in vram for frontdoor loading\n");
+ adev->debug_use_vram_fw_buf = true;
+ }
}
static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long flags)
@@ -2210,6 +2234,8 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, ddev);
+ amdgpu_init_debug_options(adev);
+
ret = amdgpu_driver_load_kms(adev, flags);
if (ret)
goto err_pci;
@@ -2290,8 +2316,6 @@ retry_init:
amdgpu_get_secondary_funcs(adev);
}
- amdgpu_init_debug_options(adev);
-
return 0;
err_pci:
@@ -2313,38 +2337,6 @@ amdgpu_pci_remove(struct pci_dev *pdev)
pm_runtime_forbid(dev->dev);
}
- if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 2) &&
- !amdgpu_sriov_vf(adev)) {
- bool need_to_reset_gpu = false;
-
- if (adev->gmc.xgmi.num_physical_nodes > 1) {
- struct amdgpu_hive_info *hive;
-
- hive = amdgpu_get_xgmi_hive(adev);
- if (hive->device_remove_count == 0)
- need_to_reset_gpu = true;
- hive->device_remove_count++;
- amdgpu_put_xgmi_hive(hive);
- } else {
- need_to_reset_gpu = true;
- }
-
- /* Workaround for ASICs need to reset SMU.
- * Called only when the first device is removed.
- */
- if (need_to_reset_gpu) {
- struct amdgpu_reset_context reset_context;
-
- adev->shutdown = true;
- memset(&reset_context, 0, sizeof(reset_context));
- reset_context.method = AMD_RESET_METHOD_NONE;
- reset_context.reset_req_dev = adev;
- set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
- set_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context.flags);
- amdgpu_device_gpu_recover(adev, NULL, &reset_context);
- }
- }
-
amdgpu_driver_unload_kms(dev);
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 84beeaa4d21c..49a5f1c73b3e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -203,7 +203,7 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
struct drm_exec exec;
long r;
- drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES);
+ drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
drm_exec_until_all_locked(&exec) {
r = drm_exec_prepare_obj(&exec, &bo->tbo.base, 1);
drm_exec_retry_on_contention(&exec);
@@ -739,7 +739,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
}
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
- DRM_EXEC_IGNORE_DUPLICATES);
+ DRM_EXEC_IGNORE_DUPLICATES, 0);
drm_exec_until_all_locked(&exec) {
if (gobj) {
r = drm_exec_lock_obj(&exec, gobj);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index d2f273d77e59..55784a9f26c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -1045,21 +1045,28 @@ int amdgpu_gmc_vram_checking(struct amdgpu_device *adev)
* seconds, so here, we just pick up three parts for emulation.
*/
ret = memcmp(vram_ptr, cptr, 10);
- if (ret)
- return ret;
+ if (ret) {
+ ret = -EIO;
+ goto release_buffer;
+ }
ret = memcmp(vram_ptr + (size / 2), cptr, 10);
- if (ret)
- return ret;
+ if (ret) {
+ ret = -EIO;
+ goto release_buffer;
+ }
ret = memcmp(vram_ptr + size - 10, cptr, 10);
- if (ret)
- return ret;
+ if (ret) {
+ ret = -EIO;
+ goto release_buffer;
+ }
+release_buffer:
amdgpu_bo_free_kernel(&vram_bo, &vram_gpu,
&vram_ptr);
- return 0;
+ return ret;
}
static ssize_t current_memory_partition_show(
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
index 081267161d40..55b65fc04b65 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
@@ -190,8 +190,8 @@ int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
pr_debug("hmm range: start = 0x%lx, end = 0x%lx",
hmm_range->start, hmm_range->end);
- /* Assuming 128MB takes maximum 1 second to fault page address */
- timeout = max((hmm_range->end - hmm_range->start) >> 27, 1UL);
+ /* Assuming 64MB takes maximum 1 second to fault page address */
+ timeout = max((hmm_range->end - hmm_range->start) >> 26, 1UL);
timeout *= HMM_RANGE_DEFAULT_TIMEOUT;
timeout = jiffies + msecs_to_jiffies(timeout);
@@ -199,6 +199,7 @@ retry:
hmm_range->notifier_seq = mmu_interval_read_begin(notifier);
r = hmm_range_fault(hmm_range);
if (unlikely(r)) {
+ schedule();
/*
* FIXME: This timeout should encompass the retry from
* mmu_interval_read_retry() as well.
@@ -212,7 +213,6 @@ retry:
break;
hmm_range->hmm_pfns += MAX_WALK_BYTE >> PAGE_SHIFT;
hmm_range->start = hmm_range->end;
- schedule();
} while (hmm_range->end < end);
hmm_range->start = start;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
index 82608df43396..d79cb13e1aa8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
@@ -175,7 +175,6 @@ struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev,
i2c->rec = *rec;
i2c->adapter.owner = THIS_MODULE;
- i2c->adapter.class = I2C_CLASS_DDC;
i2c->adapter.dev.parent = dev->dev;
i2c->dev = dev;
i2c_set_adapdata(&i2c->adapter, i2c);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index b5ebafd4a3ad..bf4f48fe438d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -1105,7 +1105,12 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (amdgpu_dpm_read_sensor(adev,
AMDGPU_PP_SENSOR_GPU_AVG_POWER,
(void *)&ui32, &ui32_size)) {
- return -EINVAL;
+ /* fall back to input power for backwards compat */
+ if (amdgpu_dpm_read_sensor(adev,
+ AMDGPU_PP_SENSOR_GPU_INPUT_POWER,
+ (void *)&ui32, &ui32_size)) {
+ return -EINVAL;
+ }
}
ui32 >>= 8;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
index 210aea590a52..59fafb8392e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
@@ -218,6 +218,7 @@ static void amdgpu_mca_smu_mca_bank_dump(struct amdgpu_device *adev, int idx, st
int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type, struct ras_err_data *err_data)
{
struct amdgpu_smuio_mcm_config_info mcm_info;
+ struct ras_err_addr err_addr = {0};
struct mca_bank_set mca_set;
struct mca_bank_node *node;
struct mca_bank_entry *entry;
@@ -246,10 +247,18 @@ int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_blo
mcm_info.socket_id = entry->info.socket_id;
mcm_info.die_id = entry->info.aid;
+ if (blk == AMDGPU_RAS_BLOCK__UMC) {
+ err_addr.err_status = entry->regs[MCA_REG_IDX_STATUS];
+ err_addr.err_ipid = entry->regs[MCA_REG_IDX_IPID];
+ err_addr.err_addr = entry->regs[MCA_REG_IDX_ADDR];
+ }
+
if (type == AMDGPU_MCA_ERROR_TYPE_UE)
- amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, (uint64_t)count);
+ amdgpu_ras_error_statistic_ue_count(err_data,
+ &mcm_info, &err_addr, (uint64_t)count);
else
- amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, (uint64_t)count);
+ amdgpu_ras_error_statistic_ce_count(err_data,
+ &mcm_info, &err_addr, (uint64_t)count);
}
out_mca_release:
@@ -351,6 +360,9 @@ int amdgpu_mca_smu_get_mca_entry(struct amdgpu_device *adev, enum amdgpu_mca_err
const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
int count;
+ if (!mca_funcs || !mca_funcs->mca_get_mca_entry)
+ return -EOPNOTSUPP;
+
switch (type) {
case AMDGPU_MCA_ERROR_TYPE_UE:
count = mca_funcs->max_ue_count;
@@ -365,10 +377,7 @@ int amdgpu_mca_smu_get_mca_entry(struct amdgpu_device *adev, enum amdgpu_mca_err
if (idx >= count)
return -EINVAL;
- if (mca_funcs && mca_funcs->mca_get_mca_entry)
- return mca_funcs->mca_get_mca_entry(adev, type, idx, entry);
-
- return -EOPNOTSUPP;
+ return mca_funcs->mca_get_mca_entry(adev, type, idx, entry);
}
#if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
index 2b488fcf2f95..b399f1b62887 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
@@ -46,6 +46,10 @@
#define MCA_REG__STATUS__ERRORCODEEXT(x) MCA_REG_FIELD(x, 21, 16)
#define MCA_REG__STATUS__ERRORCODE(x) MCA_REG_FIELD(x, 15, 0)
+#define MCA_REG__MISC0__ERRCNT(x) MCA_REG_FIELD(x, 43, 32)
+
+#define MCA_REG__SYND__ERRORINFORMATION(x) MCA_REG_FIELD(x, 17, 0)
+
enum amdgpu_mca_ip {
AMDGPU_MCA_IP_UNKNOW = -1,
AMDGPU_MCA_IP_PSP = 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index 9ddbf1494326..da48b6da0107 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -98,6 +98,26 @@ static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
return 0;
}
+static int amdgpu_mes_event_log_init(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->mes.event_log_gpu_obj,
+ &adev->mes.event_log_gpu_addr,
+ &adev->mes.event_log_cpu_addr);
+ if (r) {
+ dev_warn(adev->dev, "failed to create MES event log buffer (%d)", r);
+ return r;
+ }
+
+ memset(adev->mes.event_log_cpu_addr, 0, PAGE_SIZE);
+
+ return 0;
+
+}
+
static void amdgpu_mes_doorbell_free(struct amdgpu_device *adev)
{
bitmap_free(adev->mes.doorbell_bitmap);
@@ -182,8 +202,14 @@ int amdgpu_mes_init(struct amdgpu_device *adev)
if (r)
goto error;
+ r = amdgpu_mes_event_log_init(adev);
+ if (r)
+ goto error_doorbell;
+
return 0;
+error_doorbell:
+ amdgpu_mes_doorbell_free(adev);
error:
amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
@@ -199,6 +225,10 @@ error_ids:
void amdgpu_mes_fini(struct amdgpu_device *adev)
{
+ amdgpu_bo_free_kernel(&adev->mes.event_log_gpu_obj,
+ &adev->mes.event_log_gpu_addr,
+ &adev->mes.event_log_cpu_addr);
+
amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
@@ -886,6 +916,11 @@ int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
op_input.set_shader_debugger.process_context_addr = process_context_addr;
op_input.set_shader_debugger.flags.u32all = flags;
+
+ /* use amdgpu mes_flush_shader_debugger instead */
+ if (op_input.set_shader_debugger.flags.process_ctx_flush)
+ return -EINVAL;
+
op_input.set_shader_debugger.spi_gdbg_per_vmid_cntl = spi_gdbg_per_vmid_cntl;
memcpy(op_input.set_shader_debugger.tcp_watch_cntl, tcp_watch_cntl,
sizeof(op_input.set_shader_debugger.tcp_watch_cntl));
@@ -905,6 +940,32 @@ int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
return r;
}
+int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
+ uint64_t process_context_addr)
+{
+ struct mes_misc_op_input op_input = {0};
+ int r;
+
+ if (!adev->mes.funcs->misc_op) {
+ DRM_ERROR("mes flush shader debugger is not supported!\n");
+ return -EINVAL;
+ }
+
+ op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
+ op_input.set_shader_debugger.process_context_addr = process_context_addr;
+ op_input.set_shader_debugger.flags.process_ctx_flush = true;
+
+ amdgpu_mes_lock(&adev->mes);
+
+ r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
+ if (r)
+ DRM_ERROR("failed to set_shader_debugger\n");
+
+ amdgpu_mes_unlock(&adev->mes);
+
+ return r;
+}
+
static void
amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
@@ -1122,7 +1183,7 @@ int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
amdgpu_sync_create(&sync);
- drm_exec_init(&exec, 0);
+ drm_exec_init(&exec, 0, 0);
drm_exec_until_all_locked(&exec) {
r = drm_exec_lock_obj(&exec,
&ctx_data->meta_data_obj->tbo.base);
@@ -1193,7 +1254,7 @@ int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
struct drm_exec exec;
long r;
- drm_exec_init(&exec, 0);
+ drm_exec_init(&exec, 0, 0);
drm_exec_until_all_locked(&exec) {
r = drm_exec_lock_obj(&exec,
&ctx_data->meta_data_obj->tbo.base);
@@ -1479,3 +1540,34 @@ out:
amdgpu_ucode_release(&adev->mes.fw[pipe]);
return r;
}
+
+#if defined(CONFIG_DEBUG_FS)
+
+static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
+{
+ struct amdgpu_device *adev = m->private;
+ uint32_t *mem = (uint32_t *)(adev->mes.event_log_cpu_addr);
+
+ seq_hex_dump(m, "", DUMP_PREFIX_OFFSET, 32, 4,
+ mem, PAGE_SIZE, false);
+
+ return 0;
+}
+
+
+DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_mes_event_log);
+
+#endif
+
+void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev)
+{
+
+#if defined(CONFIG_DEBUG_FS)
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+ struct dentry *root = minor->debugfs_root;
+
+ debugfs_create_file("amdgpu_mes_event_log", 0444, root,
+ adev, &amdgpu_debugfs_mes_event_log_fops);
+
+#endif
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
index a27b424ffe00..7d4f93fea937 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
@@ -133,6 +133,11 @@ struct amdgpu_mes {
uint32_t num_mes_dbs;
unsigned long *doorbell_bitmap;
+ /* MES event log buffer */
+ struct amdgpu_bo *event_log_gpu_obj;
+ uint64_t event_log_gpu_addr;
+ void *event_log_cpu_addr;
+
/* ip specific functions */
const struct amdgpu_mes_funcs *funcs;
};
@@ -291,9 +296,10 @@ struct mes_misc_op_input {
uint64_t process_context_addr;
union {
struct {
- uint64_t single_memop : 1;
- uint64_t single_alu_op : 1;
- uint64_t reserved: 30;
+ uint32_t single_memop : 1;
+ uint32_t single_alu_op : 1;
+ uint32_t reserved: 29;
+ uint32_t process_ctx_flush: 1;
};
uint32_t u32all;
} flags;
@@ -369,7 +375,8 @@ int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
const uint32_t *tcp_watch_cntl,
uint32_t flags,
bool trap_en);
-
+int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
+ uint64_t process_context_addr);
int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
int queue_type, int idx,
struct amdgpu_mes_ctx_data *ctx_data,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 32fe05c810c6..2e4911050cc5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -32,7 +32,6 @@
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fixed.h>
#include <drm/drm_framebuffer.h>
@@ -51,6 +50,7 @@ struct amdgpu_device;
struct amdgpu_encoder;
struct amdgpu_router;
struct amdgpu_hpd;
+struct edid;
#define to_amdgpu_crtc(x) container_of(x, struct amdgpu_crtc, base)
#define to_amdgpu_connector(x) container_of(x, struct amdgpu_connector, base)
@@ -343,6 +343,97 @@ struct amdgpu_mode_info {
int disp_priority;
const struct amdgpu_display_funcs *funcs;
const enum drm_plane_type *plane_type;
+
+ /* Driver-private color mgmt props */
+
+ /* @plane_degamma_lut_property: Plane property to set a degamma LUT to
+ * convert encoded values to light linear values before sampling or
+ * blending.
+ */
+ struct drm_property *plane_degamma_lut_property;
+ /* @plane_degamma_lut_size_property: Plane property to define the max
+ * size of degamma LUT as supported by the driver (read-only).
+ */
+ struct drm_property *plane_degamma_lut_size_property;
+ /**
+ * @plane_degamma_tf_property: Plane pre-defined transfer function to
+ * to go from scanout/encoded values to linear values.
+ */
+ struct drm_property *plane_degamma_tf_property;
+ /**
+ * @plane_hdr_mult_property:
+ */
+ struct drm_property *plane_hdr_mult_property;
+
+ struct drm_property *plane_ctm_property;
+ /**
+ * @shaper_lut_property: Plane property to set pre-blending shaper LUT
+ * that converts color content before 3D LUT. If
+ * plane_shaper_tf_property != Identity TF, AMD color module will
+ * combine the user LUT values with pre-defined TF into the LUT
+ * parameters to be programmed.
+ */
+ struct drm_property *plane_shaper_lut_property;
+ /**
+ * @shaper_lut_size_property: Plane property for the size of
+ * pre-blending shaper LUT as supported by the driver (read-only).
+ */
+ struct drm_property *plane_shaper_lut_size_property;
+ /**
+ * @plane_shaper_tf_property: Plane property to set a predefined
+ * transfer function for pre-blending shaper (before applying 3D LUT)
+ * with or without LUT. There is no shaper ROM, but we can use AMD
+ * color modules to program LUT parameters from predefined TF (or
+ * from a combination of pre-defined TF and the custom 1D LUT).
+ */
+ struct drm_property *plane_shaper_tf_property;
+ /**
+ * @plane_lut3d_property: Plane property for color transformation using
+ * a 3D LUT (pre-blending), a three-dimensional array where each
+ * element is an RGB triplet. Each dimension has the size of
+ * lut3d_size. The array contains samples from the approximated
+ * function. On AMD, values between samples are estimated by
+ * tetrahedral interpolation. The array is accessed with three indices,
+ * one for each input dimension (color channel), blue being the
+ * outermost dimension, red the innermost.
+ */
+ struct drm_property *plane_lut3d_property;
+ /**
+ * @plane_degamma_lut_size_property: Plane property to define the max
+ * size of 3D LUT as supported by the driver (read-only). The max size
+ * is the max size of one dimension and, therefore, the max number of
+ * entries for 3D LUT array is the 3D LUT size cubed;
+ */
+ struct drm_property *plane_lut3d_size_property;
+ /**
+ * @plane_blend_lut_property: Plane property for output gamma before
+ * blending. Userspace set a blend LUT to convert colors after 3D LUT
+ * conversion. It works as a post-3DLUT 1D LUT. With shaper LUT, they
+ * are sandwiching 3D LUT with two 1D LUT. If plane_blend_tf_property
+ * != Identity TF, AMD color module will combine the user LUT values
+ * with pre-defined TF into the LUT parameters to be programmed.
+ */
+ struct drm_property *plane_blend_lut_property;
+ /**
+ * @plane_blend_lut_size_property: Plane property to define the max
+ * size of blend LUT as supported by the driver (read-only).
+ */
+ struct drm_property *plane_blend_lut_size_property;
+ /**
+ * @plane_blend_tf_property: Plane property to set a predefined
+ * transfer function for pre-blending blend/out_gamma (after applying
+ * 3D LUT) with or without LUT. There is no blend ROM, but we can use
+ * AMD color modules to program LUT parameters from predefined TF (or
+ * from a combination of pre-defined TF and the custom 1D LUT).
+ */
+ struct drm_property *plane_blend_tf_property;
+ /* @regamma_tf_property: Transfer function for CRTC regamma
+ * (post-blending). Possible values are defined by `enum
+ * amdgpu_transfer_function`. There is no regamma ROM, but we can use
+ * AMD color modules to program LUT parameters from predefined TF (or
+ * from a combination of pre-defined TF and the custom 1D LUT).
+ */
+ struct drm_property *regamma_tf_property;
};
#define AMDGPU_MAX_BL_LEVEL 0xFF
@@ -416,6 +507,10 @@ struct amdgpu_crtc {
int otg_inst;
struct drm_pending_vblank_event *event;
+
+ bool wb_pending;
+ bool wb_enabled;
+ struct drm_writeback_connector *wb_conn;
};
struct amdgpu_encoder_atom_dig {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index d79b4ca1ecfc..425cebcc5cbf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -1245,19 +1245,15 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
* amdgpu_bo_move_notify - notification about a memory move
* @bo: pointer to a buffer object
* @evict: if this move is evicting the buffer from the graphics address space
- * @new_mem: new information of the bufer object
*
* Marks the corresponding &amdgpu_bo buffer object as invalid, also performs
* bookkeeping.
* TTM driver callback which is called when ttm moves a buffer.
*/
-void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
- bool evict,
- struct ttm_resource *new_mem)
+void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_bo *abo;
- struct ttm_resource *old_mem = bo->resource;
if (!amdgpu_bo_is_amdgpu_bo(bo))
return;
@@ -1274,13 +1270,6 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
/* remember the eviction */
if (evict)
atomic64_inc(&adev->num_evictions);
-
- /* update statistics */
- if (!new_mem)
- return;
-
- /* move_notify is called before move happens */
- trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
}
void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
@@ -1343,6 +1332,8 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
abo = ttm_to_amdgpu_bo(bo);
+ WARN_ON(abo->vm_bo);
+
if (abo->kfd_bo)
amdgpu_amdkfd_release_notify(abo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index d28e21baef16..a3ea8a82db23 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -344,9 +344,7 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
size_t buffer_size, uint32_t *metadata_size,
uint64_t *flags);
-void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
- bool evict,
- struct ttm_resource *new_mem);
+void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict);
void amdgpu_bo_release_notify(struct ttm_buffer_object *bo);
vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index a21045d018f2..0328616473f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -466,7 +466,7 @@ static int psp_sw_init(void *handle)
}
ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
- amdgpu_sriov_vf(adev) ?
+ (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ?
AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
&psp->fw_pri_bo,
&psp->fw_pri_mc_addr,
@@ -1433,8 +1433,8 @@ int psp_xgmi_get_topology_info(struct psp_context *psp,
get_extended_data) ||
amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
IP_VERSION(13, 0, 6);
- bool ta_port_num_support = psp->xgmi_context.xgmi_ta_caps &
- EXTEND_PEER_LINK_INFO_CMD_FLAG;
+ bool ta_port_num_support = amdgpu_sriov_vf(psp->adev) ? 0 :
+ psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG;
/* popluate the shared output buffer rather than the cmd input buffer
* with node_ids as the input for GET_PEER_LINKS command execution.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 72634d675e27..31823a30dea2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -28,6 +28,7 @@
#include <linux/reboot.h>
#include <linux/syscalls.h>
#include <linux/pm_runtime.h>
+#include <linux/list_sort.h>
#include "amdgpu.h"
#include "amdgpu_ras.h"
@@ -304,11 +305,13 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
return -EINVAL;
data->head.block = block_id;
- /* only ue and ce errors are supported */
+ /* only ue, ce and poison errors are supported */
if (!memcmp("ue", err, 2))
data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
else if (!memcmp("ce", err, 2))
data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
+ else if (!memcmp("poison", err, 6))
+ data->head.type = AMDGPU_RAS_ERROR__POISON;
else
return -EINVAL;
@@ -430,9 +433,10 @@ static void amdgpu_ras_instance_mask_check(struct amdgpu_device *adev,
* The block is one of: umc, sdma, gfx, etc.
* see ras_block_string[] for details
*
- * The error type is one of: ue, ce, where,
+ * The error type is one of: ue, ce and poison where,
* ue is multi-uncorrectable
* ce is single-correctable
+ * poison is poison
*
* The sub-block is a the sub-block index, pass 0 if there is no sub-block.
* The address and value are hexadecimal numbers, leading 0x is optional.
@@ -1066,8 +1070,7 @@ static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
mcm_info = &err_info->mcm_info;
if (err_info->ce_count) {
dev_info(adev->dev, "socket: %d, die: %d, "
- "%lld new correctable hardware errors detected in %s block, "
- "no user action is needed\n",
+ "%lld new correctable hardware errors detected in %s block\n",
mcm_info->socket_id,
mcm_info->die_id,
err_info->ce_count,
@@ -1079,8 +1082,7 @@ static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
err_info = &err_node->err_info;
mcm_info = &err_info->mcm_info;
dev_info(adev->dev, "socket: %d, die: %d, "
- "%lld correctable hardware errors detected in total in %s block, "
- "no user action is needed\n",
+ "%lld correctable hardware errors detected in total in %s block\n",
mcm_info->socket_id, mcm_info->die_id, err_info->ce_count, blk_name);
}
}
@@ -1107,16 +1109,14 @@ static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev,
adev->smuio.funcs->get_die_id) {
dev_info(adev->dev, "socket: %d, die: %d "
"%ld correctable hardware errors "
- "detected in %s block, no user "
- "action is needed.\n",
+ "detected in %s block\n",
adev->smuio.funcs->get_socket_id(adev),
adev->smuio.funcs->get_die_id(adev),
ras_mgr->err_data.ce_count,
blk_name);
} else {
dev_info(adev->dev, "%ld correctable hardware errors "
- "detected in %s block, no user "
- "action is needed.\n",
+ "detected in %s block\n",
ras_mgr->err_data.ce_count,
blk_name);
}
@@ -1155,8 +1155,10 @@ static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, s
for_each_ras_error(err_node, err_data) {
err_info = &err_node->err_info;
- amdgpu_ras_error_statistic_ce_count(&obj->err_data, &err_info->mcm_info, err_info->ce_count);
- amdgpu_ras_error_statistic_ue_count(&obj->err_data, &err_info->mcm_info, err_info->ue_count);
+ amdgpu_ras_error_statistic_ce_count(&obj->err_data,
+ &err_info->mcm_info, NULL, err_info->ce_count);
+ amdgpu_ras_error_statistic_ue_count(&obj->err_data,
+ &err_info->mcm_info, NULL, err_info->ue_count);
}
} else {
/* for legacy asic path which doesn't has error source info */
@@ -1173,6 +1175,9 @@ static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev,
enum amdgpu_ras_block blk = info ? info->head.block : AMDGPU_RAS_BLOCK_COUNT;
struct amdgpu_ras_block_object *block_obj = NULL;
+ if (blk == AMDGPU_RAS_BLOCK_COUNT)
+ return -EINVAL;
+
if (error_query_mode == AMDGPU_RAS_INVALID_ERROR_QUERY)
return -EINVAL;
@@ -1914,7 +1919,7 @@ static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj
struct amdgpu_iv_entry *entry)
{
dev_info(obj->adev->dev,
- "Poison is created, no user action is needed.\n");
+ "Poison is created\n");
}
static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj,
@@ -2537,7 +2542,7 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
return 0;
data = &con->eh_data;
- *data = kmalloc(sizeof(**data), GFP_KERNEL | __GFP_ZERO);
+ *data = kzalloc(sizeof(**data), GFP_KERNEL);
if (!*data) {
ret = -ENOMEM;
goto out;
@@ -2824,10 +2829,10 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
if (con)
return 0;
- con = kmalloc(sizeof(struct amdgpu_ras) +
+ con = kzalloc(sizeof(*con) +
sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT +
sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT,
- GFP_KERNEL|__GFP_ZERO);
+ GFP_KERNEL);
if (!con)
return -ENOMEM;
@@ -2914,6 +2919,11 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
amdgpu_ras_query_poison_mode(adev);
+ /* Packed socket_id to ras feature mask bits[31:29] */
+ if (adev->smuio.funcs &&
+ adev->smuio.funcs->get_socket_id)
+ con->features |= ((adev->smuio.funcs->get_socket_id(adev)) << 29);
+
/* Get RAS schema for particular SOC */
con->schema = amdgpu_get_ras_schema(adev);
@@ -3132,8 +3142,7 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev))
return 0;
- /* enable MCA debug on APU device */
- amdgpu_ras_set_mca_debug_mode(adev, !!(adev->flags & AMD_IS_APU));
+ amdgpu_ras_set_mca_debug_mode(adev, false);
list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
if (!node->ras_obj) {
@@ -3674,8 +3683,24 @@ static struct ras_err_node *amdgpu_ras_error_node_new(void)
return err_node;
}
+static int ras_err_info_cmp(void *priv, const struct list_head *a, const struct list_head *b)
+{
+ struct ras_err_node *nodea = container_of(a, struct ras_err_node, node);
+ struct ras_err_node *nodeb = container_of(b, struct ras_err_node, node);
+ struct amdgpu_smuio_mcm_config_info *infoa = &nodea->err_info.mcm_info;
+ struct amdgpu_smuio_mcm_config_info *infob = &nodeb->err_info.mcm_info;
+
+ if (unlikely(infoa->socket_id != infob->socket_id))
+ return infoa->socket_id - infob->socket_id;
+ else
+ return infoa->die_id - infob->die_id;
+
+ return 0;
+}
+
static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_data,
- struct amdgpu_smuio_mcm_config_info *mcm_info)
+ struct amdgpu_smuio_mcm_config_info *mcm_info,
+ struct ras_err_addr *err_addr)
{
struct ras_err_node *err_node;
@@ -3689,14 +3714,19 @@ static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_d
memcpy(&err_node->err_info.mcm_info, mcm_info, sizeof(*mcm_info));
+ if (err_addr)
+ memcpy(&err_node->err_info.err_addr, err_addr, sizeof(*err_addr));
+
err_data->err_list_count++;
list_add_tail(&err_node->node, &err_data->err_node_list);
+ list_sort(NULL, &err_data->err_node_list, ras_err_info_cmp);
return &err_node->err_info;
}
int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
- struct amdgpu_smuio_mcm_config_info *mcm_info, u64 count)
+ struct amdgpu_smuio_mcm_config_info *mcm_info,
+ struct ras_err_addr *err_addr, u64 count)
{
struct ras_err_info *err_info;
@@ -3706,7 +3736,7 @@ int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
if (!count)
return 0;
- err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
+ err_info = amdgpu_ras_error_get_info(err_data, mcm_info, err_addr);
if (!err_info)
return -EINVAL;
@@ -3717,7 +3747,8 @@ int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
}
int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
- struct amdgpu_smuio_mcm_config_info *mcm_info, u64 count)
+ struct amdgpu_smuio_mcm_config_info *mcm_info,
+ struct ras_err_addr *err_addr, u64 count)
{
struct ras_err_info *err_info;
@@ -3727,7 +3758,7 @@ int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
if (!count)
return 0;
- err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
+ err_info = amdgpu_ras_error_get_info(err_data, mcm_info, err_addr);
if (!err_info)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index 6a941eb8fb8f..76fb85628716 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -452,10 +452,17 @@ struct ras_fs_data {
char debugfs_name[32];
};
+struct ras_err_addr {
+ uint64_t err_status;
+ uint64_t err_ipid;
+ uint64_t err_addr;
+};
+
struct ras_err_info {
struct amdgpu_smuio_mcm_config_info mcm_info;
u64 ce_count;
u64 ue_count;
+ struct ras_err_addr err_addr;
};
struct ras_err_node {
@@ -806,8 +813,10 @@ void amdgpu_ras_inst_reset_ras_error_count(struct amdgpu_device *adev,
int amdgpu_ras_error_data_init(struct ras_err_data *err_data);
void amdgpu_ras_error_data_fini(struct ras_err_data *err_data);
int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
- struct amdgpu_smuio_mcm_config_info *mcm_info, u64 count);
+ struct amdgpu_smuio_mcm_config_info *mcm_info,
+ struct ras_err_addr *err_addr, u64 count);
int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
- struct amdgpu_smuio_mcm_config_info *mcm_info, u64 count);
+ struct amdgpu_smuio_mcm_config_info *mcm_info,
+ struct ras_err_addr *err_addr, u64 count);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
index b0335a1c5e90..19899f6b9b2b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
@@ -32,7 +32,6 @@ enum AMDGPU_RESET_FLAGS {
AMDGPU_NEED_FULL_RESET = 0,
AMDGPU_SKIP_HW_RESET = 1,
- AMDGPU_RESET_FOR_DEVICE_REMOVE = 2,
};
struct amdgpu_reset_context {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 6a80d3ec887e..45424ebf9681 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -642,6 +642,10 @@ static void amdgpu_ring_to_mqd_prop(struct amdgpu_ring *ring,
struct amdgpu_mqd_prop *prop)
{
struct amdgpu_device *adev = ring->adev;
+ bool is_high_prio_compute = ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE &&
+ amdgpu_gfx_is_high_priority_compute_queue(adev, ring);
+ bool is_high_prio_gfx = ring->funcs->type == AMDGPU_RING_TYPE_GFX &&
+ amdgpu_gfx_is_high_priority_graphics_queue(adev, ring);
memset(prop, 0, sizeof(*prop));
@@ -659,10 +663,8 @@ static void amdgpu_ring_to_mqd_prop(struct amdgpu_ring *ring,
*/
prop->hqd_active = ring->funcs->type == AMDGPU_RING_TYPE_KIQ;
- if ((ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE &&
- amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) ||
- (ring->funcs->type == AMDGPU_RING_TYPE_GFX &&
- amdgpu_gfx_is_high_priority_graphics_queue(adev, ring))) {
+ prop->allow_tunneling = is_high_prio_compute;
+ if (is_high_prio_compute || is_high_prio_gfx) {
prop->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
prop->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
index 35e0ae9acadc..2c3675d91614 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
@@ -531,13 +531,12 @@ int amdgpu_gfx_rlc_init_microcode(struct amdgpu_device *adev,
if (version_major == 2 && version_minor == 1)
adev->gfx.rlc.is_rlc_v2_1 = true;
- if (version_minor >= 0) {
- err = amdgpu_gfx_rlc_init_microcode_v2_0(adev);
- if (err) {
- dev_err(adev->dev, "fail to init rlc v2_0 microcode\n");
- return err;
- }
+ err = amdgpu_gfx_rlc_init_microcode_v2_0(adev);
+ if (err) {
+ dev_err(adev->dev, "fail to init rlc v2_0 microcode\n");
+ return err;
}
+
if (version_minor >= 1)
amdgpu_gfx_rlc_init_microcode_v2_1(adev);
if (version_minor >= 2)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
index f3de02193138..7a6a67275404 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
@@ -61,7 +61,7 @@ int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (!bo)
return -EINVAL;
- drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
drm_exec_until_all_locked(&exec) {
r = amdgpu_vm_lock_pd(vm, &exec, 0);
if (likely(!r))
@@ -122,7 +122,7 @@ void amdgpu_seq64_unmap(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv)
vm = &fpriv->vm;
- drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
drm_exec_until_all_locked(&exec) {
r = amdgpu_vm_lock_pd(vm, &exec, 0);
if (likely(!r))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index dcd8c066bc1f..1b013a44ca99 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -191,7 +191,8 @@ static bool amdgpu_sync_test_fence(struct amdgpu_device *adev,
/* Never sync to VM updates either. */
if (fence_owner == AMDGPU_FENCE_OWNER_VM &&
- owner != AMDGPU_FENCE_OWNER_UNDEFINED)
+ owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
+ owner != AMDGPU_FENCE_OWNER_KFD)
return false;
/* Ignore fences depending on the sync mode */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index ab4a762aed5b..75c9fd2c6c2a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -545,10 +545,11 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
return r;
}
+ trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
out:
/* update statistics */
atomic64_add(bo->base.size, &adev->num_bytes_moved);
- amdgpu_bo_move_notify(bo, evict, new_mem);
+ amdgpu_bo_move_notify(bo, evict);
return 0;
}
@@ -1553,7 +1554,7 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
static void
amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo)
{
- amdgpu_bo_move_notify(bo, false, NULL);
+ amdgpu_bo_move_notify(bo, false);
}
static struct ttm_device_funcs amdgpu_bo_driver = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index b14127429f30..3e12763e477a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -1062,7 +1062,8 @@ int amdgpu_ucode_create_bo(struct amdgpu_device *adev)
{
if (adev->firmware.load_type != AMDGPU_FW_LOAD_DIRECT) {
amdgpu_bo_create_kernel(adev, adev->firmware.fw_size, PAGE_SIZE,
- amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
+ (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ?
+ AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
&adev->firmware.fw_buf,
&adev->firmware.fw_buf_mc,
&adev->firmware.fw_buf_ptr);
@@ -1397,9 +1398,13 @@ int amdgpu_ucode_request(struct amdgpu_device *adev, const struct firmware **fw,
if (err)
return -ENODEV;
+
err = amdgpu_ucode_validate(*fw);
- if (err)
+ if (err) {
dev_dbg(adev->dev, "\"%s\" failed to validate\n", fw_name);
+ release_firmware(*fw);
+ *fw = NULL;
+ }
return err;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
index ca45ba8ac171..bfbf59326ee1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
@@ -86,7 +86,7 @@ static int map_ring_data(struct amdgpu_device *adev, struct amdgpu_vm *vm,
amdgpu_sync_create(&sync);
- drm_exec_init(&exec, 0);
+ drm_exec_init(&exec, 0, 0);
drm_exec_until_all_locked(&exec) {
r = drm_exec_lock_obj(&exec, &bo->tbo.base);
drm_exec_retry_on_contention(&exec);
@@ -149,7 +149,7 @@ static int unmap_ring_data(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct drm_exec exec;
long r;
- drm_exec_init(&exec, 0);
+ drm_exec_init(&exec, 0, 0);
drm_exec_until_all_locked(&exec) {
r = drm_exec_lock_obj(&exec, &bo->tbo.base);
drm_exec_retry_on_contention(&exec);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 3a632c3b1a2c..0dcff2889e25 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -1099,7 +1099,8 @@ bool amdgpu_sriov_xnack_support(struct amdgpu_device *adev)
{
bool xnack_mode = true;
- if (amdgpu_sriov_vf(adev) && adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
+ if (amdgpu_sriov_vf(adev) &&
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2))
xnack_mode = false;
return xnack_mode;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
index db6fc0cb18eb..453a4b786cfc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0+
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_edid.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 7da71b6a9dc6..b8fcb6c55698 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -285,6 +285,7 @@ static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) {
struct amdgpu_bo *bo = vm_bo->bo;
+ vm_bo->moved = true;
if (!bo || bo->tbo.type != ttm_bo_type_kernel)
list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
else if (bo->parent)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index b6cd565562ad..4740dd65b99d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -116,7 +116,7 @@ struct amdgpu_mem_stats;
#define AMDGPU_VM_FAULT_STOP_FIRST 1
#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
-/* Reserve 4MB VRAM for page tables */
+/* How much VRAM be reserved for page tables */
#define AMDGPU_VM_RESERVED_VRAM (8ULL << 20)
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
index a2287bb25223..a160265ddc07 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
@@ -642,13 +642,14 @@ static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
if (!entry->bo)
return;
+
+ entry->bo->vm_bo = NULL;
shadow = amdgpu_bo_shadowed(entry->bo);
if (shadow) {
ttm_bo_set_bulk_move(&shadow->tbo, NULL);
amdgpu_bo_unref(&shadow);
}
ttm_bo_set_bulk_move(&entry->bo->tbo, NULL);
- entry->bo->vm_bo = NULL;
spin_lock(&entry->vm->status_lock);
list_del(&entry->vm_status);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
index e81579708e96..b9a15d51eb5c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
@@ -26,6 +26,7 @@
#include "amdgpu.h"
#include "amdgpu_ucode.h"
#include "amdgpu_vpe.h"
+#include "amdgpu_smu.h"
#include "soc15_common.h"
#include "vpe_v6_1.h"
@@ -33,8 +34,180 @@
/* VPE CSA resides in the 4th page of CSA */
#define AMDGPU_CSA_VPE_OFFSET (4096 * 3)
+/* 1 second timeout */
+#define VPE_IDLE_TIMEOUT msecs_to_jiffies(1000)
+
+#define VPE_MAX_DPM_LEVEL 4
+#define FIXED1_8_BITS_PER_FRACTIONAL_PART 8
+#define GET_PRATIO_INTEGER_PART(x) ((x) >> FIXED1_8_BITS_PER_FRACTIONAL_PART)
+
static void vpe_set_ring_funcs(struct amdgpu_device *adev);
+static inline uint16_t div16_u16_rem(uint16_t dividend, uint16_t divisor, uint16_t *remainder)
+{
+ *remainder = dividend % divisor;
+ return dividend / divisor;
+}
+
+static inline uint16_t complete_integer_division_u16(
+ uint16_t dividend,
+ uint16_t divisor,
+ uint16_t *remainder)
+{
+ return div16_u16_rem(dividend, divisor, (uint16_t *)remainder);
+}
+
+static uint16_t vpe_u1_8_from_fraction(uint16_t numerator, uint16_t denominator)
+{
+ u16 arg1_value = numerator;
+ u16 arg2_value = denominator;
+
+ uint16_t remainder;
+
+ /* determine integer part */
+ uint16_t res_value = complete_integer_division_u16(
+ arg1_value, arg2_value, &remainder);
+
+ if (res_value > 127 /* CHAR_MAX */)
+ return 0;
+
+ /* determine fractional part */
+ {
+ unsigned int i = FIXED1_8_BITS_PER_FRACTIONAL_PART;
+
+ do {
+ remainder <<= 1;
+
+ res_value <<= 1;
+
+ if (remainder >= arg2_value) {
+ res_value |= 1;
+ remainder -= arg2_value;
+ }
+ } while (--i != 0);
+ }
+
+ /* round up LSB */
+ {
+ uint16_t summand = (remainder << 1) >= arg2_value;
+
+ if ((res_value + summand) > 32767 /* SHRT_MAX */)
+ return 0;
+
+ res_value += summand;
+ }
+
+ return res_value;
+}
+
+static uint16_t vpe_internal_get_pratio(uint16_t from_frequency, uint16_t to_frequency)
+{
+ uint16_t pratio = vpe_u1_8_from_fraction(from_frequency, to_frequency);
+
+ if (GET_PRATIO_INTEGER_PART(pratio) > 1)
+ pratio = 0;
+
+ return pratio;
+}
+
+/*
+ * VPE has 4 DPM levels from level 0 (lowerest) to 3 (highest),
+ * VPE FW will dynamically decide which level should be used according to current loading.
+ *
+ * Get VPE and SOC clocks from PM, and select the appropriate four clock values,
+ * calculate the ratios of adjusting from one clock to another.
+ * The VPE FW can then request the appropriate frequency from the PMFW.
+ */
+int amdgpu_vpe_configure_dpm(struct amdgpu_vpe *vpe)
+{
+ struct amdgpu_device *adev = vpe->ring.adev;
+ uint32_t dpm_ctl;
+
+ if (adev->pm.dpm_enabled) {
+ struct dpm_clocks clock_table = { 0 };
+ struct dpm_clock *VPEClks;
+ struct dpm_clock *SOCClks;
+ uint32_t idx;
+ uint32_t pratio_vmax_vnorm = 0, pratio_vnorm_vmid = 0, pratio_vmid_vmin = 0;
+ uint16_t pratio_vmin_freq = 0, pratio_vmid_freq = 0, pratio_vnorm_freq = 0, pratio_vmax_freq = 0;
+
+ dpm_ctl = RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable));
+ dpm_ctl |= 1; /* DPM enablement */
+ WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable), dpm_ctl);
+
+ /* Get VPECLK and SOCCLK */
+ if (amdgpu_dpm_get_dpm_clock_table(adev, &clock_table)) {
+ dev_dbg(adev->dev, "%s: get clock failed!\n", __func__);
+ goto disable_dpm;
+ }
+
+ SOCClks = clock_table.SocClocks;
+ VPEClks = clock_table.VPEClocks;
+
+ /* vpe dpm only cares 4 levels. */
+ for (idx = 0; idx < VPE_MAX_DPM_LEVEL; idx++) {
+ uint32_t soc_dpm_level;
+ uint32_t min_freq;
+
+ if (idx == 0)
+ soc_dpm_level = 0;
+ else
+ soc_dpm_level = (idx * 2) + 1;
+
+ /* clamp the max level */
+ if (soc_dpm_level > PP_SMU_NUM_VPECLK_DPM_LEVELS - 1)
+ soc_dpm_level = PP_SMU_NUM_VPECLK_DPM_LEVELS - 1;
+
+ min_freq = (SOCClks[soc_dpm_level].Freq < VPEClks[soc_dpm_level].Freq) ?
+ SOCClks[soc_dpm_level].Freq : VPEClks[soc_dpm_level].Freq;
+
+ switch (idx) {
+ case 0:
+ pratio_vmin_freq = min_freq;
+ break;
+ case 1:
+ pratio_vmid_freq = min_freq;
+ break;
+ case 2:
+ pratio_vnorm_freq = min_freq;
+ break;
+ case 3:
+ pratio_vmax_freq = min_freq;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (pratio_vmin_freq && pratio_vmid_freq && pratio_vnorm_freq && pratio_vmax_freq) {
+ uint32_t pratio_ctl;
+
+ pratio_vmax_vnorm = (uint32_t)vpe_internal_get_pratio(pratio_vmax_freq, pratio_vnorm_freq);
+ pratio_vnorm_vmid = (uint32_t)vpe_internal_get_pratio(pratio_vnorm_freq, pratio_vmid_freq);
+ pratio_vmid_vmin = (uint32_t)vpe_internal_get_pratio(pratio_vmid_freq, pratio_vmin_freq);
+
+ pratio_ctl = pratio_vmax_vnorm | (pratio_vnorm_vmid << 9) | (pratio_vmid_vmin << 18);
+ WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_pratio), pratio_ctl); /* PRatio */
+ WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_request_interval), 24000); /* 1ms, unit=1/24MHz */
+ WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_decision_threshold), 1200000); /* 50ms */
+ WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_busy_clamp_threshold), 1200000);/* 50ms */
+ WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_idle_clamp_threshold), 1200000);/* 50ms */
+ dev_dbg(adev->dev, "%s: configure vpe dpm pratio done!\n", __func__);
+ } else {
+ dev_dbg(adev->dev, "%s: invalid pratio parameters!\n", __func__);
+ goto disable_dpm;
+ }
+ }
+ return 0;
+
+disable_dpm:
+ dpm_ctl = RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable));
+ dpm_ctl &= 0xfffffffe; /* Disable DPM */
+ WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable), dpm_ctl);
+ dev_dbg(adev->dev, "%s: disable vpe dpm\n", __func__);
+ return 0;
+}
+
int amdgpu_vpe_psp_update_sram(struct amdgpu_device *adev)
{
struct amdgpu_firmware_info ucode = {
@@ -134,6 +307,19 @@ static int vpe_early_init(void *handle)
return 0;
}
+static void vpe_idle_work_handler(struct work_struct *work)
+{
+ struct amdgpu_device *adev =
+ container_of(work, struct amdgpu_device, vpe.idle_work.work);
+ unsigned int fences = 0;
+
+ fences += amdgpu_fence_count_emitted(&adev->vpe.ring);
+
+ if (fences == 0)
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE);
+ else
+ schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT);
+}
static int vpe_common_init(struct amdgpu_vpe *vpe)
{
@@ -150,6 +336,9 @@ static int vpe_common_init(struct amdgpu_vpe *vpe)
return r;
}
+ vpe->context_started = false;
+ INIT_DELAYED_WORK(&adev->vpe.idle_work, vpe_idle_work_handler);
+
return 0;
}
@@ -219,6 +408,9 @@ static int vpe_hw_fini(void *handle)
vpe_ring_stop(vpe);
+ /* Power off VPE */
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE);
+
return 0;
}
@@ -226,6 +418,8 @@ static int vpe_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ cancel_delayed_work_sync(&adev->vpe.idle_work);
+
return vpe_hw_fini(adev);
}
@@ -430,6 +624,21 @@ static int vpe_set_clockgating_state(void *handle,
static int vpe_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_vpe *vpe = &adev->vpe;
+
+ if (!adev->pm.dpm_enabled)
+ dev_err(adev->dev, "Without PM, cannot support powergating\n");
+
+ dev_dbg(adev->dev, "%s: %s!\n", __func__, (state == AMD_PG_STATE_GATE) ? "GATE":"UNGATE");
+
+ if (state == AMD_PG_STATE_GATE) {
+ amdgpu_dpm_enable_vpe(adev, false);
+ vpe->context_started = false;
+ } else {
+ amdgpu_dpm_enable_vpe(adev, true);
+ }
+
return 0;
}
@@ -595,6 +804,38 @@ err0:
return ret;
}
+static void vpe_ring_begin_use(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_vpe *vpe = &adev->vpe;
+
+ cancel_delayed_work_sync(&adev->vpe.idle_work);
+
+ /* Power on VPE and notify VPE of new context */
+ if (!vpe->context_started) {
+ uint32_t context_notify;
+
+ /* Power on VPE */
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_UNGATE);
+
+ /* Indicates that a job from a new context has been submitted. */
+ context_notify = RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.context_indicator));
+ if ((context_notify & 0x1) == 0)
+ context_notify |= 0x1;
+ else
+ context_notify &= ~(0x1);
+ WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.context_indicator), context_notify);
+ vpe->context_started = true;
+ }
+}
+
+static void vpe_ring_end_use(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT);
+}
+
static const struct amdgpu_ring_funcs vpe_ring_funcs = {
.type = AMDGPU_RING_TYPE_VPE,
.align_mask = 0xf,
@@ -625,6 +866,8 @@ static const struct amdgpu_ring_funcs vpe_ring_funcs = {
.init_cond_exec = vpe_ring_init_cond_exec,
.patch_cond_exec = vpe_ring_patch_cond_exec,
.preempt_ib = vpe_ring_preempt_ib,
+ .begin_use = vpe_ring_begin_use,
+ .end_use = vpe_ring_end_use,
};
static void vpe_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h
index 29d56f7ae4a9..1153ddaea64d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h
@@ -47,6 +47,15 @@ struct vpe_regs {
uint32_t queue0_rb_wptr_lo;
uint32_t queue0_rb_wptr_hi;
uint32_t queue0_preempt;
+
+ uint32_t dpm_enable;
+ uint32_t dpm_pratio;
+ uint32_t dpm_request_interval;
+ uint32_t dpm_decision_threshold;
+ uint32_t dpm_busy_clamp_threshold;
+ uint32_t dpm_idle_clamp_threshold;
+ uint32_t dpm_request_lv;
+ uint32_t context_indicator;
};
struct amdgpu_vpe {
@@ -63,12 +72,15 @@ struct amdgpu_vpe {
struct amdgpu_bo *cmdbuf_obj;
uint64_t cmdbuf_gpu_addr;
uint32_t *cmdbuf_cpu_addr;
+ struct delayed_work idle_work;
+ bool context_started;
};
int amdgpu_vpe_psp_update_sram(struct amdgpu_device *adev);
int amdgpu_vpe_init_microcode(struct amdgpu_vpe *vpe);
int amdgpu_vpe_ring_init(struct amdgpu_vpe *vpe);
int amdgpu_vpe_ring_fini(struct amdgpu_vpe *vpe);
+int amdgpu_vpe_configure_dpm(struct amdgpu_vpe *vpe);
#define vpe_ring_init(vpe) ((vpe)->funcs->ring_init ? (vpe)->funcs->ring_init((vpe)) : 0)
#define vpe_ring_start(vpe) ((vpe)->funcs->ring_start ? (vpe)->funcs->ring_start((vpe)) : 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 44d8c1a11e1b..a6c88f2fe6e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -823,6 +823,28 @@ static int amdgpu_xgmi_initialize_hive_get_data_partition(struct amdgpu_hive_inf
return 0;
}
+static void amdgpu_xgmi_fill_topology_info(struct amdgpu_device *adev,
+ struct amdgpu_device *peer_adev)
+{
+ struct psp_xgmi_topology_info *top_info = &adev->psp.xgmi_context.top_info;
+ struct psp_xgmi_topology_info *peer_info = &peer_adev->psp.xgmi_context.top_info;
+
+ for (int i = 0; i < peer_info->num_nodes; i++) {
+ if (peer_info->nodes[i].node_id == adev->gmc.xgmi.node_id) {
+ for (int j = 0; j < top_info->num_nodes; j++) {
+ if (top_info->nodes[j].node_id == peer_adev->gmc.xgmi.node_id) {
+ peer_info->nodes[i].num_hops = top_info->nodes[j].num_hops;
+ peer_info->nodes[i].is_sharing_enabled =
+ top_info->nodes[j].is_sharing_enabled;
+ peer_info->nodes[i].num_links =
+ top_info->nodes[j].num_links;
+ return;
+ }
+ }
+ }
+ }
+}
+
int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
{
struct psp_xgmi_topology_info *top_info;
@@ -897,18 +919,38 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
goto exit_unlock;
}
- /* get latest topology info for each device from psp */
- list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
- ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count,
- &tmp_adev->psp.xgmi_context.top_info, false);
+ if (amdgpu_sriov_vf(adev) &&
+ adev->psp.xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG) {
+ /* only get topology for VF being init if it can support full duplex */
+ ret = psp_xgmi_get_topology_info(&adev->psp, count,
+ &adev->psp.xgmi_context.top_info, false);
if (ret) {
- dev_err(tmp_adev->dev,
+ dev_err(adev->dev,
"XGMI: Get topology failure on device %llx, hive %llx, ret %d",
- tmp_adev->gmc.xgmi.node_id,
- tmp_adev->gmc.xgmi.hive_id, ret);
- /* To do : continue with some node failed or disable the whole hive */
+ adev->gmc.xgmi.node_id,
+ adev->gmc.xgmi.hive_id, ret);
+ /* To do: continue with some node failed or disable the whole hive*/
goto exit_unlock;
}
+
+ /* fill the topology info for peers instead of getting from PSP */
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+ amdgpu_xgmi_fill_topology_info(adev, tmp_adev);
+ }
+ } else {
+ /* get latest topology info for each device from psp */
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+ ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count,
+ &tmp_adev->psp.xgmi_context.top_info, false);
+ if (ret) {
+ dev_err(tmp_adev->dev,
+ "XGMI: Get topology failure on device %llx, hive %llx, ret %d",
+ tmp_adev->gmc.xgmi.node_id,
+ tmp_adev->gmc.xgmi.hive_id, ret);
+ /* To do : continue with some node failed or disable the whole hive */
+ goto exit_unlock;
+ }
+ }
}
/* get topology again for hives that support extended data */
@@ -1271,10 +1313,10 @@ static void __xgmi_v6_4_0_query_error_count(struct amdgpu_device *adev, struct a
switch (xgmi_v6_4_0_pcs_mca_get_error_type(adev, status)) {
case AMDGPU_MCA_ERROR_TYPE_UE:
- amdgpu_ras_error_statistic_ue_count(err_data, mcm_info, 1ULL);
+ amdgpu_ras_error_statistic_ue_count(err_data, mcm_info, NULL, 1ULL);
break;
case AMDGPU_MCA_ERROR_TYPE_CE:
- amdgpu_ras_error_statistic_ce_count(err_data, mcm_info, 1ULL);
+ amdgpu_ras_error_statistic_ce_count(err_data, mcm_info, NULL, 1ULL);
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index 6cab882e8061..1592c63b3099 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -43,7 +43,6 @@ struct amdgpu_hive_info {
} pstate;
struct amdgpu_reset_domain *reset_domain;
- uint32_t device_remove_count;
atomic_t ras_recovery;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
index a00b8c6f0a94..d6f808acfb17 100644
--- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
+++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
@@ -848,6 +848,198 @@ static ssize_t aqua_vanjaram_read_xgmi_state(struct amdgpu_device *adev,
return xgmi_reg_state->common_header.structure_size;
}
+#define smnreg_0x11C00070 0x11C00070
+#define smnreg_0x11C00210 0x11C00210
+
+static struct aqua_reg_list wafl_reg_addrs[] = {
+ { smnreg_0x11C00070, 4, DW_ADDR_INCR },
+ { smnreg_0x11C00210, 1, 0 },
+};
+
+#define WAFL_LINK_REG(smnreg, l) ((smnreg) | (l << 20))
+
+#define NUM_WAFL_SMN_REGS 5
+
+static ssize_t aqua_vanjaram_read_wafl_state(struct amdgpu_device *adev,
+ void *buf, size_t max_size)
+{
+ struct amdgpu_reg_state_wafl_v1_0 *wafl_reg_state;
+ uint32_t start_addr, incrx, num_regs, szbuf;
+ struct amdgpu_regs_wafl_v1_0 *wafl_regs;
+ struct amdgpu_smn_reg_data *reg_data;
+ const int max_wafl_instances = 8;
+ int inst = 0, i, j, r, n;
+ const int wafl_inst = 2;
+ void *p;
+
+ if (!buf || !max_size)
+ return -EINVAL;
+
+ wafl_reg_state = (struct amdgpu_reg_state_wafl_v1_0 *)buf;
+
+ szbuf = sizeof(*wafl_reg_state) +
+ amdgpu_reginst_size(max_wafl_instances, sizeof(*wafl_regs),
+ NUM_WAFL_SMN_REGS);
+
+ if (max_size < szbuf)
+ return -EOVERFLOW;
+
+ p = &wafl_reg_state->wafl_state_regs[0];
+ for_each_inst(i, adev->aid_mask) {
+ for (j = 0; j < wafl_inst; ++j) {
+ wafl_regs = (struct amdgpu_regs_wafl_v1_0 *)p;
+ wafl_regs->inst_header.instance = inst++;
+
+ wafl_regs->inst_header.state = AMDGPU_INST_S_OK;
+ wafl_regs->inst_header.num_smn_regs = NUM_WAFL_SMN_REGS;
+
+ reg_data = wafl_regs->smn_reg_values;
+
+ for (r = 0; r < ARRAY_SIZE(wafl_reg_addrs); r++) {
+ start_addr = wafl_reg_addrs[r].start_addr;
+ incrx = wafl_reg_addrs[r].incrx;
+ num_regs = wafl_reg_addrs[r].num_regs;
+ for (n = 0; n < num_regs; n++) {
+ aqua_read_smn_ext(
+ adev, reg_data,
+ WAFL_LINK_REG(start_addr, j) +
+ n * incrx,
+ i);
+ ++reg_data;
+ }
+ }
+ p = reg_data;
+ }
+ }
+
+ wafl_reg_state->common_header.structure_size = szbuf;
+ wafl_reg_state->common_header.format_revision = 1;
+ wafl_reg_state->common_header.content_revision = 0;
+ wafl_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_WAFL;
+ wafl_reg_state->common_header.num_instances = max_wafl_instances;
+
+ return wafl_reg_state->common_header.structure_size;
+}
+
+#define smnreg_0x1B311060 0x1B311060
+#define smnreg_0x1B411060 0x1B411060
+#define smnreg_0x1B511060 0x1B511060
+#define smnreg_0x1B611060 0x1B611060
+
+#define smnreg_0x1C307120 0x1C307120
+#define smnreg_0x1C317120 0x1C317120
+
+#define smnreg_0x1C320830 0x1C320830
+#define smnreg_0x1C380830 0x1C380830
+#define smnreg_0x1C3D0830 0x1C3D0830
+#define smnreg_0x1C420830 0x1C420830
+
+#define smnreg_0x1C320100 0x1C320100
+#define smnreg_0x1C380100 0x1C380100
+#define smnreg_0x1C3D0100 0x1C3D0100
+#define smnreg_0x1C420100 0x1C420100
+
+#define smnreg_0x1B310500 0x1B310500
+#define smnreg_0x1C300400 0x1C300400
+
+#define USR_CAKE_INCR 0x11000
+#define USR_LINK_INCR 0x100000
+#define USR_CP_INCR 0x10000
+
+#define NUM_USR_SMN_REGS 20
+
+struct aqua_reg_list usr_reg_addrs[] = {
+ { smnreg_0x1B311060, 4, DW_ADDR_INCR },
+ { smnreg_0x1B411060, 4, DW_ADDR_INCR },
+ { smnreg_0x1B511060, 4, DW_ADDR_INCR },
+ { smnreg_0x1B611060, 4, DW_ADDR_INCR },
+ { smnreg_0x1C307120, 2, DW_ADDR_INCR },
+ { smnreg_0x1C317120, 2, DW_ADDR_INCR },
+};
+
+#define NUM_USR1_SMN_REGS 46
+struct aqua_reg_list usr1_reg_addrs[] = {
+ { smnreg_0x1C320830, 6, USR_CAKE_INCR },
+ { smnreg_0x1C380830, 5, USR_CAKE_INCR },
+ { smnreg_0x1C3D0830, 5, USR_CAKE_INCR },
+ { smnreg_0x1C420830, 4, USR_CAKE_INCR },
+ { smnreg_0x1C320100, 6, USR_CAKE_INCR },
+ { smnreg_0x1C380100, 5, USR_CAKE_INCR },
+ { smnreg_0x1C3D0100, 5, USR_CAKE_INCR },
+ { smnreg_0x1C420100, 4, USR_CAKE_INCR },
+ { smnreg_0x1B310500, 4, USR_LINK_INCR },
+ { smnreg_0x1C300400, 2, USR_CP_INCR },
+};
+
+static ssize_t aqua_vanjaram_read_usr_state(struct amdgpu_device *adev,
+ void *buf, size_t max_size,
+ int reg_state)
+{
+ uint32_t start_addr, incrx, num_regs, szbuf, num_smn;
+ struct amdgpu_reg_state_usr_v1_0 *usr_reg_state;
+ struct amdgpu_regs_usr_v1_0 *usr_regs;
+ struct amdgpu_smn_reg_data *reg_data;
+ const int max_usr_instances = 4;
+ struct aqua_reg_list *reg_addrs;
+ int inst = 0, i, n, r, arr_size;
+ void *p;
+
+ if (!buf || !max_size)
+ return -EINVAL;
+
+ switch (reg_state) {
+ case AMDGPU_REG_STATE_TYPE_USR:
+ arr_size = ARRAY_SIZE(usr_reg_addrs);
+ reg_addrs = usr_reg_addrs;
+ num_smn = NUM_USR_SMN_REGS;
+ break;
+ case AMDGPU_REG_STATE_TYPE_USR_1:
+ arr_size = ARRAY_SIZE(usr1_reg_addrs);
+ reg_addrs = usr1_reg_addrs;
+ num_smn = NUM_USR1_SMN_REGS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ usr_reg_state = (struct amdgpu_reg_state_usr_v1_0 *)buf;
+
+ szbuf = sizeof(*usr_reg_state) + amdgpu_reginst_size(max_usr_instances,
+ sizeof(*usr_regs),
+ num_smn);
+ if (max_size < szbuf)
+ return -EOVERFLOW;
+
+ p = &usr_reg_state->usr_state_regs[0];
+ for_each_inst(i, adev->aid_mask) {
+ usr_regs = (struct amdgpu_regs_usr_v1_0 *)p;
+ usr_regs->inst_header.instance = inst++;
+ usr_regs->inst_header.state = AMDGPU_INST_S_OK;
+ usr_regs->inst_header.num_smn_regs = num_smn;
+ reg_data = usr_regs->smn_reg_values;
+
+ for (r = 0; r < arr_size; r++) {
+ start_addr = reg_addrs[r].start_addr;
+ incrx = reg_addrs[r].incrx;
+ num_regs = reg_addrs[r].num_regs;
+ for (n = 0; n < num_regs; n++) {
+ aqua_read_smn_ext(adev, reg_data,
+ start_addr + n * incrx, i);
+ reg_data++;
+ }
+ }
+ p = reg_data;
+ }
+
+ usr_reg_state->common_header.structure_size = szbuf;
+ usr_reg_state->common_header.format_revision = 1;
+ usr_reg_state->common_header.content_revision = 0;
+ usr_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_USR;
+ usr_reg_state->common_header.num_instances = max_usr_instances;
+
+ return usr_reg_state->common_header.structure_size;
+}
+
ssize_t aqua_vanjaram_get_reg_state(struct amdgpu_device *adev,
enum amdgpu_reg_state reg_state, void *buf,
size_t max_size)
@@ -861,6 +1053,17 @@ ssize_t aqua_vanjaram_get_reg_state(struct amdgpu_device *adev,
case AMDGPU_REG_STATE_TYPE_XGMI:
size = aqua_vanjaram_read_xgmi_state(adev, buf, max_size);
break;
+ case AMDGPU_REG_STATE_TYPE_WAFL:
+ size = aqua_vanjaram_read_wafl_state(adev, buf, max_size);
+ break;
+ case AMDGPU_REG_STATE_TYPE_USR:
+ size = aqua_vanjaram_read_usr_state(adev, buf, max_size,
+ AMDGPU_REG_STATE_TYPE_USR);
+ break;
+ case AMDGPU_REG_STATE_TYPE_USR_1:
+ size = aqua_vanjaram_read_usr_state(
+ adev, buf, max_size, AMDGPU_REG_STATE_TYPE_USR_1);
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c
index f0737fb3a999..d1bba9c64e16 100644
--- a/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c
@@ -30,6 +30,8 @@
#define regATHUB_MISC_CNTL_V3_0_1 0x00d7
#define regATHUB_MISC_CNTL_V3_0_1_BASE_IDX 0
+#define regATHUB_MISC_CNTL_V3_3_0 0x00d8
+#define regATHUB_MISC_CNTL_V3_3_0_BASE_IDX 0
static uint32_t athub_v3_0_get_cg_cntl(struct amdgpu_device *adev)
@@ -40,6 +42,9 @@ static uint32_t athub_v3_0_get_cg_cntl(struct amdgpu_device *adev)
case IP_VERSION(3, 0, 1):
data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_0_1);
break;
+ case IP_VERSION(3, 3, 0):
+ data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_3_0);
+ break;
default:
data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL);
break;
@@ -53,6 +58,9 @@ static void athub_v3_0_set_cg_cntl(struct amdgpu_device *adev, uint32_t data)
case IP_VERSION(3, 0, 1):
WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_0_1, data);
break;
+ case IP_VERSION(3, 3, 0):
+ WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_3_0, data);
+ break;
default:
WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL, data);
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
index 2c221000782c..a33e890c70d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.c
+++ b/drivers/gpu/drm/amd/amdgpu/atom.c
@@ -395,7 +395,6 @@ static void atom_skip_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr)
(*ptr)++;
return;
}
- return;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index 3ee219aa2891..7672abe6c140 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -28,6 +28,7 @@
#include <acpi/video.h>
+#include <drm/drm_edid.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "amdgpu_connectors.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index bb666cb7522e..587ee632a3b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -21,6 +21,7 @@
*
*/
+#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 7af277f61cca..f22ec27365bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -21,6 +21,7 @@
*
*/
+#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 143efc37a17f..4dbe9b3259b5 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -23,6 +23,7 @@
#include <linux/pci.h>
+#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index adeddfb7ff12..05bcce23385e 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -21,6 +21,7 @@
*
*/
+#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index c8a3bf01743f..d63cab294883 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -3996,16 +3996,13 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
if (!amdgpu_sriov_vf(adev)) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", ucode_prefix);
- err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name);
- /* don't check this. There are apparently firmwares in the wild with
- * incorrect size in the header
- */
- if (err == -ENODEV)
- goto out;
+ err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
if (err)
- dev_dbg(adev->dev,
- "gfx10: amdgpu_ucode_request() failed \"%s\"\n",
- fw_name);
+ goto out;
+
+ /* don't validate this firmware. There are apparently firmwares
+ * in the wild with incorrect size in the header
+ */
rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
@@ -6593,7 +6590,8 @@ static int gfx_v10_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
#endif
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH,
+ prop->allow_tunneling);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
mqd->cp_hqd_pq_control = tmp;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index c659ef0f47ce..0ea0866c261f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -115,7 +115,7 @@ static const struct soc15_reg_golden golden_settings_gc_11_5_0[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xfffffff3),
SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL, 0xffffffff, 0xf37fff3f),
SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL3, 0xfffffffb, 0x00f40188),
- SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL4, 0xf0ffffff, 0x8000b007),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL4, 0xf0ffffff, 0x80009007),
SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_CL_ENHANCE, 0xf1ffffff, 0x00880007),
SOC15_REG_GOLDEN_VALUE(GC, 0, regPC_CONFIG_CNTL_1, 0xffffffff, 0x00010000),
SOC15_REG_GOLDEN_VALUE(GC, 0, regTA_CNTL_AUX, 0xf7f7ffff, 0x01030000),
@@ -3847,7 +3847,8 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
(order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH,
+ prop->allow_tunneling);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
mqd->cp_hqd_pq_control = tmp;
@@ -4473,11 +4474,43 @@ static int gfx_v11_0_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
+static int gfx_v11_0_request_gfx_index_mutex(struct amdgpu_device *adev,
+ int req)
+{
+ u32 i, tmp, val;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ /* Request with MeId=2, PipeId=0 */
+ tmp = REG_SET_FIELD(0, CP_GFX_INDEX_MUTEX, REQUEST, req);
+ tmp = REG_SET_FIELD(tmp, CP_GFX_INDEX_MUTEX, CLIENTID, 4);
+ WREG32_SOC15(GC, 0, regCP_GFX_INDEX_MUTEX, tmp);
+
+ val = RREG32_SOC15(GC, 0, regCP_GFX_INDEX_MUTEX);
+ if (req) {
+ if (val == tmp)
+ break;
+ } else {
+ tmp = REG_SET_FIELD(tmp, CP_GFX_INDEX_MUTEX,
+ REQUEST, 1);
+
+ /* unlocked or locked by firmware */
+ if (val != tmp)
+ break;
+ }
+ udelay(1);
+ }
+
+ if (i >= adev->usec_timeout)
+ return -EINVAL;
+
+ return 0;
+}
+
static int gfx_v11_0_soft_reset(void *handle)
{
u32 grbm_soft_reset = 0;
u32 tmp;
- int i, j, k;
+ int r, i, j, k;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL);
@@ -4517,6 +4550,13 @@ static int gfx_v11_0_soft_reset(void *handle)
}
}
+ /* Try to acquire the gfx mutex before access to CP_VMID_RESET */
+ r = gfx_v11_0_request_gfx_index_mutex(adev, 1);
+ if (r) {
+ DRM_ERROR("Failed to acquire the gfx mutex during soft reset\n");
+ return r;
+ }
+
WREG32_SOC15(GC, 0, regCP_VMID_RESET, 0xfffffffe);
// Read CP_VMID_RESET register three times.
@@ -4525,6 +4565,13 @@ static int gfx_v11_0_soft_reset(void *handle)
RREG32_SOC15(GC, 0, regCP_VMID_RESET);
RREG32_SOC15(GC, 0, regCP_VMID_RESET);
+ /* release the gfx mutex */
+ r = gfx_v11_0_request_gfx_index_mutex(adev, 0);
+ if (r) {
+ DRM_ERROR("Failed to release the gfx mutex during soft reset\n");
+ return r;
+ }
+
for (i = 0; i < adev->usec_timeout; i++) {
if (!RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) &&
!RREG32_SOC15(GC, 0, regCP_GFX_HQD_ACTIVE))
@@ -6336,6 +6383,9 @@ static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev,
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+ bitmap = i * adev->gfx.config.max_sh_per_se + j;
+ if (!((gfx_v11_0_get_sa_active_bitmap(adev) >> bitmap) & 1))
+ continue;
mask = 1;
counter = 0;
gfx_v11_0_select_se_sh(adev, i, j, 0xffffffff, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index 00b21ece081f..131cddbdda0d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -3828,8 +3828,8 @@ static void gfx_v9_4_3_inst_query_ras_err_count(struct amdgpu_device *adev,
/* the caller should make sure initialize value of
* err_data->ue_count and err_data->ce_count
*/
- amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count);
- amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, ce_count);
+ amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count);
+ amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, ce_count);
}
static void gfx_v9_4_3_inst_reset_ras_err_count(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index 53a2ba5fcf4b..22175da0e16a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -102,7 +102,9 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
- if (adev->apu_flags & AMD_APU_IS_RAVEN2)
+ if (adev->apu_flags & (AMD_APU_IS_RAVEN2 |
+ AMD_APU_IS_RENOIR |
+ AMD_APU_IS_GREEN_SARDINE))
/*
* Raven2 has a HW issue that it is unable to use the
* vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR.
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
index 55423ff1bb49..49aecdcee006 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
@@ -139,7 +139,9 @@ gfxhub_v1_2_xcc_init_system_aperture_regs(struct amdgpu_device *adev,
WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_SYSTEM_APERTURE_LOW_ADDR,
min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
- if (adev->apu_flags & AMD_APU_IS_RAVEN2)
+ if (adev->apu_flags & (AMD_APU_IS_RAVEN2 |
+ AMD_APU_IS_RENOIR |
+ AMD_APU_IS_GREEN_SARDINE))
/*
* Raven2 has a HW issue that it is unable to use the
* vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR.
@@ -454,10 +456,12 @@ static void gfxhub_v1_2_xcc_gart_disable(struct amdgpu_device *adev,
WREG32_SOC15_RLC(GC, GET_INST(GC, j), regMC_VM_MX_L1_TLB_CNTL, tmp);
/* Setup L2 cache */
- tmp = RREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
- WREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL, tmp);
- WREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL3, 0);
+ if (!amdgpu_sriov_vf(adev)) {
+ tmp = RREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
+ WREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL, tmp);
+ WREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL3, 0);
+ }
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index a5a05c16c10d..6c5185608854 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -1041,6 +1041,10 @@ static int gmc_v10_0_hw_fini(void *handle)
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
+ if (adev->gmc.ecc_irq.funcs &&
+ amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
+ amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index 23d7b548d13f..c9c653cfc765 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -941,6 +941,11 @@ static int gmc_v11_0_hw_fini(void *handle)
}
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
+
+ if (adev->gmc.ecc_irq.funcs &&
+ amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
+ amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
+
gmc_v11_0_gart_disable(adev);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 2ac5820e9c92..f9039d64ff2d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -883,7 +883,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
* GRBM interface.
*/
if ((vmhub == AMDGPU_GFXHUB(0)) &&
- (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2)))
+ (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 4, 2)))
RREG32_NO_KIQ(req);
for (j = 0; j < adev->usec_timeout; j++) {
@@ -2380,6 +2380,10 @@ static int gmc_v9_0_hw_fini(void *handle)
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
+ if (adev->gmc.ecc_irq.funcs &&
+ amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
+ amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
index 49e934975719..4db6bb73ead4 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
@@ -129,6 +129,11 @@ static void hdp_v4_0_get_clockgating_state(struct amdgpu_device *adev,
{
int data;
+ if (amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(4, 4, 2)) {
+ /* Default enabled */
+ *flags |= AMD_CG_SUPPORT_HDP_MGCG;
+ return;
+ }
/* AMD_CG_SUPPORT_HDP_LS */
data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
index 9df011323d4b..6ede85b28cc8 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
@@ -155,13 +155,6 @@ static int jpeg_v4_0_5_hw_init(void *handle)
struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
int r;
- adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
- (adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0);
-
- WREG32_SOC15(VCN, 0, regVCN_JPEG_DB_CTRL,
- ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT |
- VCN_JPEG_DB_CTRL__EN_MASK);
-
r = amdgpu_ring_test_helper(ring);
if (r)
return r;
@@ -336,6 +329,14 @@ static int jpeg_v4_0_5_start(struct amdgpu_device *adev)
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_jpeg(adev, true);
+ /* doorbell programming is done for every playback */
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0);
+
+ WREG32_SOC15(VCN, 0, regVCN_JPEG_DB_CTRL,
+ ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT |
+ VCN_JPEG_DB_CTRL__EN_MASK);
+
/* disable power gating */
r = jpeg_v4_0_5_disable_static_power_gating(adev);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index 4dfec56e1b7f..26d71a22395d 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -408,6 +408,8 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes)
mes_set_hw_res_pkt.enable_reg_active_poll = 1;
mes_set_hw_res_pkt.enable_level_process_quantum_check = 1;
mes_set_hw_res_pkt.oversubscription_timer = 50;
+ mes_set_hw_res_pkt.enable_mes_event_int_logging = 1;
+ mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr;
return mes_v11_0_submit_pkt_and_poll_completion(mes,
&mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt),
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 843219a91736..e3ddd22aa172 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -96,7 +96,9 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
- if (adev->apu_flags & AMD_APU_IS_RAVEN2)
+ if (adev->apu_flags & (AMD_APU_IS_RAVEN2 |
+ AMD_APU_IS_RENOIR |
+ AMD_APU_IS_GREEN_SARDINE))
/*
* Raven2 has a HW issue that it is unable to use the vram which
* is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
index 9b0146732e13..fb53aacdcba2 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
@@ -652,8 +652,8 @@ static void mmhub_v1_8_inst_query_ras_error_count(struct amdgpu_device *adev,
AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
&ue_count);
- amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, ce_count);
- amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count);
+ amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, ce_count);
+ amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count);
}
static void mmhub_v1_8_query_ras_error_count(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index 6d24c84924cb..19986ff6a48d 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -401,8 +401,7 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
if (err_data.ce_count)
dev_info(adev->dev, "%ld correctable hardware "
- "errors detected in %s block, "
- "no user action is needed.\n",
+ "errors detected in %s block\n",
obj->err_data.ce_count,
get_ras_block_str(adev->nbio.ras_if));
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
index 25a3da83e0fb..e90f33780803 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
@@ -597,8 +597,7 @@ static void nbio_v7_9_handle_ras_controller_intr_no_bifring(struct amdgpu_device
if (err_data.ce_count)
dev_info(adev->dev, "%ld correctable hardware "
- "errors detected in %s block, "
- "no user action is needed.\n",
+ "errors detected in %s block\n",
obj->err_data.ce_count,
get_ras_block_str(adev->nbio.ras_if));
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
index 3cf4684d0d3f..df1844d0800f 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -60,7 +60,7 @@ MODULE_FIRMWARE("amdgpu/psp_14_0_0_ta.bin");
#define GFX_CMD_USB_PD_USE_LFB 0x480
/* Retry times for vmbx ready wait */
-#define PSP_VMBX_POLLING_LIMIT 20000
+#define PSP_VMBX_POLLING_LIMIT 3000
/* VBIOS gfl defines */
#define MBOX_READY_MASK 0x80000000
@@ -161,14 +161,18 @@ static int psp_v13_0_wait_for_vmbx_ready(struct psp_context *psp)
static int psp_v13_0_wait_for_bootloader(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
- int retry_loop, ret;
+ int retry_loop, retry_cnt, ret;
+ retry_cnt =
+ (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6)) ?
+ PSP_VMBX_POLLING_LIMIT :
+ 10;
/* Wait for bootloader to signify that it is ready having bit 31 of
* C2PMSG_35 set to 1. All other bits are expected to be cleared.
* If there is an error in processing command, bits[7:0] will be set.
* This is applicable for PSP v13.0.6 and newer.
*/
- for (retry_loop = 0; retry_loop < PSP_VMBX_POLLING_LIMIT; retry_loop++) {
+ for (retry_loop = 0; retry_loop < retry_cnt; retry_loop++) {
ret = psp_wait_for(
psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
0x80000000, 0xffffffff, false);
@@ -821,7 +825,7 @@ static int psp_v13_0_query_boot_status(struct psp_context *psp)
if (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6))
return 0;
- if (RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_59) < 0x00a10007)
+ if (RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_59) < 0x00a10109)
return 0;
for_each_inst(i, inst_mask) {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 45377a175250..8d5d86675a7f 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -813,12 +813,12 @@ static int sdma_v2_4_early_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
+ adev->sdma.num_instances = SDMA_MAX_INSTANCE;
+
r = sdma_v2_4_init_microcode(adev);
if (r)
return r;
- adev->sdma.num_instances = SDMA_MAX_INSTANCE;
-
sdma_v2_4_set_ring_funcs(adev);
sdma_v2_4_set_buffer_funcs(adev);
sdma_v2_4_set_vm_pte_funcs(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
index 0f24af6f2810..2d688dca26be 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
@@ -2156,7 +2156,7 @@ static void sdma_v4_4_2_inst_query_ras_error_count(struct amdgpu_device *adev,
AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
&ue_count);
- amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count);
+ amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count);
}
static void sdma_v4_4_2_query_ras_error_count(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index 83c240f741b5..0058f3f7cf6e 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -1643,6 +1643,32 @@ static void sdma_v5_2_get_clockgating_state(void *handle, u64 *flags)
*flags |= AMD_CG_SUPPORT_SDMA_LS;
}
+static void sdma_v5_2_ring_begin_use(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ /* SDMA 5.2.3 (RMB) FW doesn't seem to properly
+ * disallow GFXOFF in some cases leading to
+ * hangs in SDMA. Disallow GFXOFF while SDMA is active.
+ * We can probably just limit this to 5.2.3,
+ * but it shouldn't hurt for other parts since
+ * this GFXOFF will be disallowed anyway when SDMA is
+ * active, this just makes it explicit.
+ */
+ amdgpu_gfx_off_ctrl(adev, false);
+}
+
+static void sdma_v5_2_ring_end_use(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ /* SDMA 5.2.3 (RMB) FW doesn't seem to properly
+ * disallow GFXOFF in some cases leading to
+ * hangs in SDMA. Allow GFXOFF when SDMA is complete.
+ */
+ amdgpu_gfx_off_ctrl(adev, true);
+}
+
const struct amd_ip_funcs sdma_v5_2_ip_funcs = {
.name = "sdma_v5_2",
.early_init = sdma_v5_2_early_init,
@@ -1690,6 +1716,8 @@ static const struct amdgpu_ring_funcs sdma_v5_2_ring_funcs = {
.test_ib = sdma_v5_2_ring_test_ib,
.insert_nop = sdma_v5_2_ring_insert_nop,
.pad_ib = sdma_v5_2_ring_pad_ib,
+ .begin_use = sdma_v5_2_ring_begin_use,
+ .end_use = sdma_v5_2_ring_end_use,
.emit_wreg = sdma_v5_2_ring_emit_wreg,
.emit_reg_wait = sdma_v5_2_ring_emit_reg_wait,
.emit_reg_write_reg_wait = sdma_v5_2_ring_emit_reg_write_reg_wait,
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 9ad4d6d3122b..15033efec2ba 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -1424,11 +1424,14 @@ static void soc15_common_get_clockgating_state(void *handle, u64 *flags)
if (amdgpu_sriov_vf(adev))
*flags = 0;
- adev->nbio.funcs->get_clockgating_state(adev, flags);
+ if (adev->nbio.funcs && adev->nbio.funcs->get_clockgating_state)
+ adev->nbio.funcs->get_clockgating_state(adev, flags);
- adev->hdp.funcs->get_clock_gating_state(adev, flags);
+ if (adev->hdp.funcs && adev->hdp.funcs->get_clock_gating_state)
+ adev->hdp.funcs->get_clock_gating_state(adev, flags);
- if (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2)) {
+ if ((amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2)) &&
+ (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6))) {
/* AMD_CG_SUPPORT_DRM_MGCG */
data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
if (!(data & 0x01000000))
@@ -1441,9 +1444,11 @@ static void soc15_common_get_clockgating_state(void *handle, u64 *flags)
}
/* AMD_CG_SUPPORT_ROM_MGCG */
- adev->smuio.funcs->get_clock_gating_state(adev, flags);
+ if (adev->smuio.funcs && adev->smuio.funcs->get_clock_gating_state)
+ adev->smuio.funcs->get_clock_gating_state(adev, flags);
- adev->df.funcs->get_clockgating_state(adev, flags);
+ if (adev->df.funcs && adev->df.funcs->get_clockgating_state)
+ adev->df.funcs->get_clockgating_state(adev, flags);
}
static int soc15_common_set_powergating_state(void *handle,
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
index e9c2ff74f0bc..7458a218e89d 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
@@ -26,6 +26,7 @@
#include "amdgpu.h"
#include "umc/umc_12_0_0_offset.h"
#include "umc/umc_12_0_0_sh_mask.h"
+#include "mp/mp_13_0_6_sh_mask.h"
const uint32_t
umc_v12_0_channel_idx_tbl[]
@@ -88,16 +89,26 @@ static void umc_v12_0_reset_error_count(struct amdgpu_device *adev)
umc_v12_0_reset_error_count_per_channel, NULL);
}
-bool umc_v12_0_is_uncorrectable_error(uint64_t mc_umc_status)
+bool umc_v12_0_is_uncorrectable_error(struct amdgpu_device *adev, uint64_t mc_umc_status)
{
+ if (amdgpu_ras_is_poison_mode_supported(adev) &&
+ (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
+ (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1))
+ return true;
+
return ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1));
}
-bool umc_v12_0_is_correctable_error(uint64_t mc_umc_status)
+bool umc_v12_0_is_correctable_error(struct amdgpu_device *adev, uint64_t mc_umc_status)
{
+ if (amdgpu_ras_is_poison_mode_supported(adev) &&
+ (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
+ (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1))
+ return false;
+
return (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1 ||
(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 &&
@@ -105,7 +116,7 @@ bool umc_v12_0_is_correctable_error(uint64_t mc_umc_status)
/* Identify data parity error in replay mode */
((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 0x5 ||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 0xb) &&
- !(umc_v12_0_is_uncorrectable_error(mc_umc_status)))));
+ !(umc_v12_0_is_uncorrectable_error(adev, mc_umc_status)))));
}
static void umc_v12_0_query_correctable_error_count(struct amdgpu_device *adev,
@@ -124,7 +135,7 @@ static void umc_v12_0_query_correctable_error_count(struct amdgpu_device *adev,
mc_umc_status =
RREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4);
- if (umc_v12_0_is_correctable_error(mc_umc_status))
+ if (umc_v12_0_is_correctable_error(adev, mc_umc_status))
*error_count += 1;
}
@@ -142,7 +153,7 @@ static void umc_v12_0_query_uncorrectable_error_count(struct amdgpu_device *adev
mc_umc_status =
RREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4);
- if (umc_v12_0_is_uncorrectable_error(mc_umc_status))
+ if (umc_v12_0_is_uncorrectable_error(adev, mc_umc_status))
*error_count += 1;
}
@@ -166,8 +177,8 @@ static int umc_v12_0_query_error_count(struct amdgpu_device *adev,
umc_v12_0_query_correctable_error_count(adev, umc_reg_offset, &ce_count);
umc_v12_0_query_uncorrectable_error_count(adev, umc_reg_offset, &ue_count);
- amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count);
- amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, ce_count);
+ amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count);
+ amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, ce_count);
return 0;
}
@@ -360,6 +371,59 @@ static int umc_v12_0_err_cnt_init_per_channel(struct amdgpu_device *adev,
return 0;
}
+static void umc_v12_0_ecc_info_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ amdgpu_mca_smu_log_ras_error(adev,
+ AMDGPU_RAS_BLOCK__UMC, AMDGPU_MCA_ERROR_TYPE_CE, ras_error_status);
+ amdgpu_mca_smu_log_ras_error(adev,
+ AMDGPU_RAS_BLOCK__UMC, AMDGPU_MCA_ERROR_TYPE_UE, ras_error_status);
+}
+
+static void umc_v12_0_ecc_info_query_ras_error_address(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ struct ras_err_node *err_node;
+ uint64_t mc_umc_status;
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+
+ for_each_ras_error(err_node, err_data) {
+ mc_umc_status = err_node->err_info.err_addr.err_status;
+ if (!mc_umc_status)
+ continue;
+
+ if (umc_v12_0_is_uncorrectable_error(adev, mc_umc_status)) {
+ uint64_t mca_addr, err_addr, mca_ipid;
+ uint32_t InstanceIdLo;
+ struct amdgpu_smuio_mcm_config_info *mcm_info;
+
+ mcm_info = &err_node->err_info.mcm_info;
+ mca_addr = err_node->err_info.err_addr.err_addr;
+ mca_ipid = err_node->err_info.err_addr.err_ipid;
+
+ err_addr = REG_GET_FIELD(mca_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
+ InstanceIdLo = REG_GET_FIELD(mca_ipid, MCMP1_IPIDT0, InstanceIdLo);
+
+ dev_info(adev->dev, "UMC:IPID:0x%llx, aid:%d, inst:%d, ch:%d, err_addr:0x%llx\n",
+ mca_ipid,
+ mcm_info->die_id,
+ MCA_IPID_LO_2_UMC_INST(InstanceIdLo),
+ MCA_IPID_LO_2_UMC_CH(InstanceIdLo),
+ err_addr);
+
+ umc_v12_0_convert_error_address(adev,
+ err_data, err_addr,
+ MCA_IPID_LO_2_UMC_CH(InstanceIdLo),
+ MCA_IPID_LO_2_UMC_INST(InstanceIdLo),
+ mcm_info->die_id);
+
+ /* Clear umc error address content */
+ memset(&err_node->err_info.err_addr,
+ 0, sizeof(err_node->err_info.err_addr));
+ }
+ }
+}
+
static void umc_v12_0_err_cnt_init(struct amdgpu_device *adev)
{
amdgpu_umc_loop_channels(adev,
@@ -386,4 +450,6 @@ struct amdgpu_umc_ras umc_v12_0_ras = {
},
.err_cnt_init = umc_v12_0_err_cnt_init,
.query_ras_poison_mode = umc_v12_0_query_ras_poison_mode,
+ .ecc_info_query_ras_error_count = umc_v12_0_ecc_info_query_ras_error_count,
+ .ecc_info_query_ras_error_address = umc_v12_0_ecc_info_query_ras_error_address,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
index b34b1e358f8b..e8de3a92251a 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
@@ -117,8 +117,12 @@
(pa) |= (UMC_V12_0_CHANNEL_HASH_CH6(channel_idx, pa) << UMC_V12_0_PA_CH6_BIT); \
} while (0)
-bool umc_v12_0_is_uncorrectable_error(uint64_t mc_umc_status);
-bool umc_v12_0_is_correctable_error(uint64_t mc_umc_status);
+#define MCA_IPID_LO_2_UMC_CH(_ipid_lo) (((((_ipid_lo) >> 20) & 0x1) * 4) + \
+ (((_ipid_lo) >> 12) & 0xF))
+#define MCA_IPID_LO_2_UMC_INST(_ipid_lo) (((_ipid_lo) >> 21) & 0x7)
+
+bool umc_v12_0_is_uncorrectable_error(struct amdgpu_device *adev, uint64_t mc_umc_status);
+bool umc_v12_0_is_correctable_error(struct amdgpu_device *adev, uint64_t mc_umc_status);
extern const uint32_t
umc_v12_0_channel_idx_tbl[]
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
index 530549314ce4..a3ee3c4c650f 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
@@ -64,7 +64,7 @@ static void umc_v6_7_query_error_status_helper(struct amdgpu_device *adev,
uint64_t reg_value;
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1)
- dev_info(adev->dev, "Deferred error, no user action is needed.\n");
+ dev_info(adev->dev, "Deferred error\n");
if (mc_umc_status)
dev_info(adev->dev, "MCA STATUS 0x%llx, umc_reg_offset 0x%x\n", mc_umc_status, umc_reg_offset);
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index 48bfcd0d558b..169ed400ee7b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -100,6 +100,31 @@ static int vcn_v4_0_early_init(void *handle)
return amdgpu_vcn_early_init(adev);
}
+static int vcn_v4_0_fw_shared_init(struct amdgpu_device *adev, int inst_idx)
+{
+ volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+
+ fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
+ fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
+ fw_shared->sq.is_enabled = 1;
+
+ fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_SMU_DPM_INTERFACE_FLAG);
+ fw_shared->smu_dpm_interface.smu_interface_type = (adev->flags & AMD_IS_APU) ?
+ AMDGPU_VCN_SMU_DPM_INTERFACE_APU : AMDGPU_VCN_SMU_DPM_INTERFACE_DGPU;
+
+ if (amdgpu_ip_version(adev, VCN_HWIP, 0) ==
+ IP_VERSION(4, 0, 2)) {
+ fw_shared->present_flag_0 |= AMDGPU_FW_SHARED_FLAG_0_DRM_KEY_INJECT;
+ fw_shared->drm_key_wa.method =
+ AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING;
+ }
+
+ if (amdgpu_vcnfw_log)
+ amdgpu_vcn_fwlog_init(&adev->vcn.inst[inst_idx]);
+
+ return 0;
+}
+
/**
* vcn_v4_0_sw_init - sw init for VCN block
*
@@ -124,8 +149,6 @@ static int vcn_v4_0_sw_init(void *handle)
return r;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
-
if (adev->vcn.harvest_config & (1 << i))
continue;
@@ -161,23 +184,7 @@ static int vcn_v4_0_sw_init(void *handle)
if (r)
return r;
- fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
- fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
- fw_shared->sq.is_enabled = 1;
-
- fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_SMU_DPM_INTERFACE_FLAG);
- fw_shared->smu_dpm_interface.smu_interface_type = (adev->flags & AMD_IS_APU) ?
- AMDGPU_VCN_SMU_DPM_INTERFACE_APU : AMDGPU_VCN_SMU_DPM_INTERFACE_DGPU;
-
- if (amdgpu_ip_version(adev, VCN_HWIP, 0) ==
- IP_VERSION(4, 0, 2)) {
- fw_shared->present_flag_0 |= AMDGPU_FW_SHARED_FLAG_0_DRM_KEY_INJECT;
- fw_shared->drm_key_wa.method =
- AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING;
- }
-
- if (amdgpu_vcnfw_log)
- amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
+ vcn_v4_0_fw_shared_init(adev, i);
}
if (amdgpu_sriov_vf(adev)) {
@@ -1273,6 +1280,9 @@ static int vcn_v4_0_start_sriov(struct amdgpu_device *adev)
if (adev->vcn.harvest_config & (1 << i))
continue;
+ // Must re/init fw_shared at beginning
+ vcn_v4_0_fw_shared_init(adev, i);
+
table_size = 0;
MMSCH_V4_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, i,
diff --git a/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c b/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c
index 174f13eff575..d20060a51e05 100644
--- a/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c
@@ -96,6 +96,10 @@ static int vpe_v6_1_load_microcode(struct amdgpu_vpe *vpe)
adev->vpe.cmdbuf_cpu_addr[1] = f32_cntl;
amdgpu_vpe_psp_update_sram(adev);
+
+ /* Config DPM */
+ amdgpu_vpe_configure_dpm(vpe);
+
return 0;
}
@@ -128,6 +132,8 @@ static int vpe_v6_1_load_microcode(struct amdgpu_vpe *vpe)
}
vpe_v6_1_halt(vpe, false);
+ /* Config DPM */
+ amdgpu_vpe_configure_dpm(vpe);
return 0;
}
@@ -264,6 +270,15 @@ static int vpe_v6_1_set_regs(struct amdgpu_vpe *vpe)
vpe->regs.queue0_rb_wptr_hi = regVPEC_QUEUE0_RB_WPTR_HI;
vpe->regs.queue0_preempt = regVPEC_QUEUE0_PREEMPT;
+ vpe->regs.dpm_enable = regVPEC_PUB_DUMMY2;
+ vpe->regs.dpm_pratio = regVPEC_QUEUE6_DUMMY4;
+ vpe->regs.dpm_request_interval = regVPEC_QUEUE5_DUMMY3;
+ vpe->regs.dpm_decision_threshold = regVPEC_QUEUE5_DUMMY4;
+ vpe->regs.dpm_busy_clamp_threshold = regVPEC_QUEUE7_DUMMY2;
+ vpe->regs.dpm_idle_clamp_threshold = regVPEC_QUEUE7_DUMMY3;
+ vpe->regs.dpm_request_lv = regVPEC_QUEUE7_DUMMY1;
+ vpe->regs.context_indicator = regVPEC_QUEUE6_DUMMY3;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index f6d4748c1980..ce4c52ec34d8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -1564,16 +1564,11 @@ static int kfd_ioctl_import_dmabuf(struct file *filep,
{
struct kfd_ioctl_import_dmabuf_args *args = data;
struct kfd_process_device *pdd;
- struct dma_buf *dmabuf;
int idr_handle;
uint64_t size;
void *mem;
int r;
- dmabuf = dma_buf_get(args->dmabuf_fd);
- if (IS_ERR(dmabuf))
- return PTR_ERR(dmabuf);
-
mutex_lock(&p->mutex);
pdd = kfd_process_device_data_by_id(p, args->gpu_id);
if (!pdd) {
@@ -1587,10 +1582,10 @@ static int kfd_ioctl_import_dmabuf(struct file *filep,
goto err_unlock;
}
- r = amdgpu_amdkfd_gpuvm_import_dmabuf(pdd->dev->adev, dmabuf,
- args->va_addr, pdd->drm_priv,
- (struct kgd_mem **)&mem, &size,
- NULL);
+ r = amdgpu_amdkfd_gpuvm_import_dmabuf_fd(pdd->dev->adev, args->dmabuf_fd,
+ args->va_addr, pdd->drm_priv,
+ (struct kgd_mem **)&mem, &size,
+ NULL);
if (r)
goto err_unlock;
@@ -1601,7 +1596,6 @@ static int kfd_ioctl_import_dmabuf(struct file *filep,
}
mutex_unlock(&p->mutex);
- dma_buf_put(dmabuf);
args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
@@ -1612,7 +1606,6 @@ err_free:
pdd->drm_priv, NULL);
err_unlock:
mutex_unlock(&p->mutex);
- dma_buf_put(dmabuf);
return r;
}
@@ -1855,8 +1848,8 @@ static uint32_t get_process_num_bos(struct kfd_process *p)
return num_of_bos;
}
-static int criu_get_prime_handle(struct kgd_mem *mem, int flags,
- u32 *shared_fd)
+static int criu_get_prime_handle(struct kgd_mem *mem,
+ int flags, u32 *shared_fd)
{
struct dma_buf *dmabuf;
int ret;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index 62b205dac63a..6604a3f99c5e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -330,12 +330,6 @@ static void kfd_init_apertures_vi(struct kfd_process_device *pdd, uint8_t id)
pdd->gpuvm_limit =
pdd->dev->kfd->shared_resources.gpuvm_size - 1;
- /* dGPUs: the reserved space for kernel
- * before SVM
- */
- pdd->qpd.cwsr_base = SVM_CWSR_BASE;
- pdd->qpd.ib_base = SVM_IB_BASE;
-
pdd->scratch_base = MAKE_SCRATCH_APP_BASE_VI();
pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base);
}
@@ -345,18 +339,18 @@ static void kfd_init_apertures_v9(struct kfd_process_device *pdd, uint8_t id)
pdd->lds_base = MAKE_LDS_APP_BASE_V9();
pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base);
- pdd->gpuvm_base = PAGE_SIZE;
+ /* Raven needs SVM to support graphic handle, etc. Leave the small
+ * reserved space before SVM on Raven as well, even though we don't
+ * have to.
+ * Set gpuvm_base and gpuvm_limit to CANONICAL addresses so that they
+ * are used in Thunk to reserve SVM.
+ */
+ pdd->gpuvm_base = SVM_USER_BASE;
pdd->gpuvm_limit =
pdd->dev->kfd->shared_resources.gpuvm_size - 1;
pdd->scratch_base = MAKE_SCRATCH_APP_BASE_V9();
pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base);
-
- /*
- * Place TBA/TMA on opposite side of VM hole to prevent
- * stray faults from triggering SVM on these pages.
- */
- pdd->qpd.cwsr_base = pdd->dev->kfd->shared_resources.gpuvm_size;
}
int kfd_init_apertures(struct kfd_process *process)
@@ -413,6 +407,12 @@ int kfd_init_apertures(struct kfd_process *process)
return -EINVAL;
}
}
+
+ /* dGPUs: the reserved space for kernel
+ * before SVM
+ */
+ pdd->qpd.cwsr_base = SVM_CWSR_BASE;
+ pdd->qpd.ib_base = SVM_IB_BASE;
}
dev_dbg(kfd_device, "node id %u\n", id);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index 6c25dab051d5..f856901055d3 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -260,19 +260,6 @@ static void svm_migrate_put_sys_page(unsigned long addr)
put_page(page);
}
-static unsigned long svm_migrate_successful_pages(struct migrate_vma *migrate)
-{
- unsigned long cpages = 0;
- unsigned long i;
-
- for (i = 0; i < migrate->npages; i++) {
- if (migrate->src[i] & MIGRATE_PFN_VALID &&
- migrate->src[i] & MIGRATE_PFN_MIGRATE)
- cpages++;
- }
- return cpages;
-}
-
static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate)
{
unsigned long upages = 0;
@@ -402,6 +389,7 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
struct dma_fence *mfence = NULL;
struct migrate_vma migrate = { 0 };
unsigned long cpages = 0;
+ unsigned long mpages = 0;
dma_addr_t *scratch;
void *buf;
int r = -ENOMEM;
@@ -442,20 +430,21 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
goto out_free;
}
if (cpages != npages)
- pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n",
+ pr_debug("partial migration, 0x%lx/0x%llx pages collected\n",
cpages, npages);
else
- pr_debug("0x%lx pages migrated\n", cpages);
+ pr_debug("0x%lx pages collected\n", cpages);
r = svm_migrate_copy_to_vram(node, prange, &migrate, &mfence, scratch, ttm_res_offset);
migrate_vma_pages(&migrate);
- pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
- svm_migrate_successful_pages(&migrate), cpages, migrate.npages);
-
svm_migrate_copy_done(adev, mfence);
migrate_vma_finalize(&migrate);
+ mpages = cpages - svm_migrate_unsuccessful_pages(&migrate);
+ pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
+ mpages, cpages, migrate.npages);
+
kfd_smi_event_migration_end(node, p->lead_thread->pid,
start >> PAGE_SHIFT, end >> PAGE_SHIFT,
0, node->id, trigger);
@@ -465,12 +454,12 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
out_free:
kvfree(buf);
out:
- if (!r && cpages) {
+ if (!r && mpages) {
pdd = svm_range_get_pdd_by_node(prange, node);
if (pdd)
- WRITE_ONCE(pdd->page_in, pdd->page_in + cpages);
+ WRITE_ONCE(pdd->page_in, pdd->page_in + mpages);
- return cpages;
+ return mpages;
}
return r;
}
@@ -479,6 +468,8 @@ out:
* svm_migrate_ram_to_vram - migrate svm range from system to device
* @prange: range structure
* @best_loc: the device to migrate to
+ * @start_mgr: start page to migrate
+ * @last_mgr: last page to migrate
* @mm: the process mm structure
* @trigger: reason of migration
*
@@ -489,19 +480,20 @@ out:
*/
static int
svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
+ unsigned long start_mgr, unsigned long last_mgr,
struct mm_struct *mm, uint32_t trigger)
{
unsigned long addr, start, end;
struct vm_area_struct *vma;
uint64_t ttm_res_offset;
struct kfd_node *node;
- unsigned long cpages = 0;
+ unsigned long mpages = 0;
long r = 0;
- if (prange->actual_loc == best_loc) {
- pr_debug("svms 0x%p [0x%lx 0x%lx] already on best_loc 0x%x\n",
- prange->svms, prange->start, prange->last, best_loc);
- return 0;
+ if (start_mgr < prange->start || last_mgr > prange->last) {
+ pr_debug("range [0x%lx 0x%lx] out prange [0x%lx 0x%lx]\n",
+ start_mgr, last_mgr, prange->start, prange->last);
+ return -EFAULT;
}
node = svm_range_get_node_by_id(prange, best_loc);
@@ -510,18 +502,19 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
return -ENODEV;
}
- pr_debug("svms 0x%p [0x%lx 0x%lx] to gpu 0x%x\n", prange->svms,
- prange->start, prange->last, best_loc);
+ pr_debug("svms 0x%p [0x%lx 0x%lx] in [0x%lx 0x%lx] to gpu 0x%x\n",
+ prange->svms, start_mgr, last_mgr, prange->start, prange->last,
+ best_loc);
- start = prange->start << PAGE_SHIFT;
- end = (prange->last + 1) << PAGE_SHIFT;
+ start = start_mgr << PAGE_SHIFT;
+ end = (last_mgr + 1) << PAGE_SHIFT;
r = svm_range_vram_node_new(node, prange, true);
if (r) {
dev_dbg(node->adev->dev, "fail %ld to alloc vram\n", r);
return r;
}
- ttm_res_offset = prange->offset << PAGE_SHIFT;
+ ttm_res_offset = (start_mgr - prange->start + prange->offset) << PAGE_SHIFT;
for (addr = start; addr < end;) {
unsigned long next;
@@ -536,16 +529,19 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
pr_debug("failed %ld to migrate\n", r);
break;
} else {
- cpages += r;
+ mpages += r;
}
ttm_res_offset += next - addr;
addr = next;
}
- if (cpages) {
+ if (mpages) {
prange->actual_loc = best_loc;
- svm_range_dma_unmap(prange);
- } else {
+ prange->vram_pages += mpages;
+ } else if (!prange->actual_loc) {
+ /* if no page migrated and all pages from prange are at
+ * sys ram drop svm_bo got from svm_range_vram_node_new
+ */
svm_range_vram_node_free(prange);
}
@@ -663,9 +659,8 @@ out_oom:
* Context: Process context, caller hold mmap read lock, prange->migrate_mutex
*
* Return:
- * 0 - success with all pages migrated
* negative values - indicate error
- * positive values - partial migration, number of pages not migrated
+ * positive values or zero - number of pages got migrated
*/
static long
svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
@@ -676,6 +671,7 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
uint64_t npages = (end - start) >> PAGE_SHIFT;
unsigned long upages = npages;
unsigned long cpages = 0;
+ unsigned long mpages = 0;
struct amdgpu_device *adev = node->adev;
struct kfd_process_device *pdd;
struct dma_fence *mfence = NULL;
@@ -725,10 +721,10 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
goto out_free;
}
if (cpages != npages)
- pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n",
+ pr_debug("partial migration, 0x%lx/0x%llx pages collected\n",
cpages, npages);
else
- pr_debug("0x%lx pages migrated\n", cpages);
+ pr_debug("0x%lx pages collected\n", cpages);
r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence,
scratch, npages);
@@ -751,17 +747,21 @@ out_free:
kvfree(buf);
out:
if (!r && cpages) {
+ mpages = cpages - upages;
pdd = svm_range_get_pdd_by_node(prange, node);
if (pdd)
- WRITE_ONCE(pdd->page_out, pdd->page_out + cpages);
+ WRITE_ONCE(pdd->page_out, pdd->page_out + mpages);
}
- return r ? r : upages;
+
+ return r ? r : mpages;
}
/**
* svm_migrate_vram_to_ram - migrate svm range from device to system
* @prange: range structure
* @mm: process mm, use current->mm if NULL
+ * @start_mgr: start page need be migrated to sys ram
+ * @last_mgr: last page need be migrated to sys ram
* @trigger: reason of migration
* @fault_page: is from vmf->page, svm_migrate_to_ram(), this is CPU page fault callback
*
@@ -771,6 +771,7 @@ out:
* 0 - OK, otherwise error code
*/
int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
+ unsigned long start_mgr, unsigned long last_mgr,
uint32_t trigger, struct page *fault_page)
{
struct kfd_node *node;
@@ -778,26 +779,33 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
unsigned long addr;
unsigned long start;
unsigned long end;
- unsigned long upages = 0;
+ unsigned long mpages = 0;
long r = 0;
+ /* this pragne has no any vram page to migrate to sys ram */
if (!prange->actual_loc) {
pr_debug("[0x%lx 0x%lx] already migrated to ram\n",
prange->start, prange->last);
return 0;
}
+ if (start_mgr < prange->start || last_mgr > prange->last) {
+ pr_debug("range [0x%lx 0x%lx] out prange [0x%lx 0x%lx]\n",
+ start_mgr, last_mgr, prange->start, prange->last);
+ return -EFAULT;
+ }
+
node = svm_range_get_node_by_id(prange, prange->actual_loc);
if (!node) {
pr_debug("failed to get kfd node by id 0x%x\n", prange->actual_loc);
return -ENODEV;
}
pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] from gpu 0x%x to ram\n",
- prange->svms, prange, prange->start, prange->last,
+ prange->svms, prange, start_mgr, last_mgr,
prange->actual_loc);
- start = prange->start << PAGE_SHIFT;
- end = (prange->last + 1) << PAGE_SHIFT;
+ start = start_mgr << PAGE_SHIFT;
+ end = (last_mgr + 1) << PAGE_SHIFT;
for (addr = start; addr < end;) {
unsigned long next;
@@ -816,14 +824,21 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
pr_debug("failed %ld to migrate prange %p\n", r, prange);
break;
} else {
- upages += r;
+ mpages += r;
}
addr = next;
}
- if (r >= 0 && !upages) {
- svm_range_vram_node_free(prange);
- prange->actual_loc = 0;
+ if (r >= 0) {
+ prange->vram_pages -= mpages;
+
+ /* prange does not have vram page set its actual_loc to system
+ * and drop its svm_bo ref
+ */
+ if (prange->vram_pages == 0 && prange->ttm_res) {
+ prange->actual_loc = 0;
+ svm_range_vram_node_free(prange);
+ }
}
return r < 0 ? r : 0;
@@ -833,17 +848,23 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
* svm_migrate_vram_to_vram - migrate svm range from device to device
* @prange: range structure
* @best_loc: the device to migrate to
+ * @start: start page need be migrated to sys ram
+ * @last: last page need be migrated to sys ram
* @mm: process mm, use current->mm if NULL
* @trigger: reason of migration
*
* Context: Process context, caller hold mmap read lock, svms lock, prange lock
*
+ * migrate all vram pages in prange to sys ram, then migrate
+ * [start, last] pages from sys ram to gpu node best_loc.
+ *
* Return:
* 0 - OK, otherwise error code
*/
static int
svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc,
- struct mm_struct *mm, uint32_t trigger)
+ unsigned long start, unsigned long last,
+ struct mm_struct *mm, uint32_t trigger)
{
int r, retries = 3;
@@ -855,7 +876,8 @@ svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc,
pr_debug("from gpu 0x%x to gpu 0x%x\n", prange->actual_loc, best_loc);
do {
- r = svm_migrate_vram_to_ram(prange, mm, trigger, NULL);
+ r = svm_migrate_vram_to_ram(prange, mm, prange->start, prange->last,
+ trigger, NULL);
if (r)
return r;
} while (prange->actual_loc && --retries);
@@ -863,17 +885,21 @@ svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc,
if (prange->actual_loc)
return -EDEADLK;
- return svm_migrate_ram_to_vram(prange, best_loc, mm, trigger);
+ return svm_migrate_ram_to_vram(prange, best_loc, start, last, mm, trigger);
}
int
svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc,
+ unsigned long start, unsigned long last,
struct mm_struct *mm, uint32_t trigger)
{
- if (!prange->actual_loc)
- return svm_migrate_ram_to_vram(prange, best_loc, mm, trigger);
+ if (!prange->actual_loc || prange->actual_loc == best_loc)
+ return svm_migrate_ram_to_vram(prange, best_loc, start, last,
+ mm, trigger);
+
else
- return svm_migrate_vram_to_vram(prange, best_loc, mm, trigger);
+ return svm_migrate_vram_to_vram(prange, best_loc, start, last,
+ mm, trigger);
}
@@ -889,10 +915,9 @@ svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc,
*/
static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
{
+ unsigned long start, last, size;
unsigned long addr = vmf->address;
struct svm_range_bo *svm_bo;
- enum svm_work_list_ops op;
- struct svm_range *parent;
struct svm_range *prange;
struct kfd_process *p;
struct mm_struct *mm;
@@ -929,51 +954,31 @@ static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
mutex_lock(&p->svms.lock);
- prange = svm_range_from_addr(&p->svms, addr, &parent);
+ prange = svm_range_from_addr(&p->svms, addr, NULL);
if (!prange) {
pr_debug("failed get range svms 0x%p addr 0x%lx\n", &p->svms, addr);
r = -EFAULT;
goto out_unlock_svms;
}
- mutex_lock(&parent->migrate_mutex);
- if (prange != parent)
- mutex_lock_nested(&prange->migrate_mutex, 1);
+ mutex_lock(&prange->migrate_mutex);
if (!prange->actual_loc)
goto out_unlock_prange;
- svm_range_lock(parent);
- if (prange != parent)
- mutex_lock_nested(&prange->lock, 1);
- r = svm_range_split_by_granularity(p, mm, addr, parent, prange);
- if (prange != parent)
- mutex_unlock(&prange->lock);
- svm_range_unlock(parent);
- if (r) {
- pr_debug("failed %d to split range by granularity\n", r);
- goto out_unlock_prange;
- }
+ /* Align migration range start and size to granularity size */
+ size = 1UL << prange->granularity;
+ start = max(ALIGN_DOWN(addr, size), prange->start);
+ last = min(ALIGN(addr + 1, size) - 1, prange->last);
- r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm,
- KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU,
- vmf->page);
+ r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm, start, last,
+ KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU, vmf->page);
if (r)
pr_debug("failed %d migrate svms 0x%p range 0x%p [0x%lx 0x%lx]\n",
- r, prange->svms, prange, prange->start, prange->last);
-
- /* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */
- if (p->xnack_enabled && parent == prange)
- op = SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP;
- else
- op = SVM_OP_UPDATE_RANGE_NOTIFIER;
- svm_range_add_list_work(&p->svms, parent, mm, op);
- schedule_deferred_list_work(&p->svms);
+ r, prange->svms, prange, start, last);
out_unlock_prange:
- if (prange != parent)
- mutex_unlock(&prange->migrate_mutex);
- mutex_unlock(&parent->migrate_mutex);
+ mutex_unlock(&prange->migrate_mutex);
out_unlock_svms:
mutex_unlock(&p->svms.lock);
out_unref_process:
@@ -1021,7 +1026,7 @@ int kgd2kfd_init_zone_device(struct amdgpu_device *adev)
} else {
res = devm_request_free_mem_region(adev->dev, &iomem_resource, size);
if (IS_ERR(res))
- return -ENOMEM;
+ return PTR_ERR(res);
pgmap->range.start = res->start;
pgmap->range.end = res->end;
pgmap->type = MEMORY_DEVICE_PRIVATE;
@@ -1037,10 +1042,10 @@ int kgd2kfd_init_zone_device(struct amdgpu_device *adev)
r = devm_memremap_pages(adev->dev, pgmap);
if (IS_ERR(r)) {
pr_err("failed to register HMM device memory\n");
- /* Disable SVM support capability */
- pgmap->type = 0;
if (pgmap->type == MEMORY_DEVICE_PRIVATE)
devm_release_mem_region(adev->dev, res->start, resource_size(res));
+ /* Disable SVM support capability */
+ pgmap->type = 0;
return PTR_ERR(r);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
index 487f26368164..2eebf67f9c2c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
@@ -41,9 +41,13 @@ enum MIGRATION_COPY_DIR {
};
int svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc,
+ unsigned long start, unsigned long last,
struct mm_struct *mm, uint32_t trigger);
+
int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
+ unsigned long start, unsigned long last,
uint32_t trigger, struct page *fault_page);
+
unsigned long
svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 45366b4ca976..17fbedbf3651 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -917,7 +917,7 @@ struct kfd_process {
* fence will be triggered during eviction and new one will be created
* during restore
*/
- struct dma_fence *ef;
+ struct dma_fence __rcu *ef;
/* Work items for evicting and restoring BOs */
struct delayed_work eviction_work;
@@ -970,7 +970,7 @@ struct kfd_process {
struct work_struct debug_event_workarea;
/* Tracks debug per-vmid request for debug flags */
- bool dbg_flags;
+ u32 dbg_flags;
atomic_t poison;
/* Queues are in paused stated because we are in the process of doing a CRIU checkpoint */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 71df51fcc1b0..717a60d7a4ea 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -1110,6 +1110,7 @@ static void kfd_process_wq_release(struct work_struct *work)
{
struct kfd_process *p = container_of(work, struct kfd_process,
release_work);
+ struct dma_fence *ef;
kfd_process_dequeue_from_all_devices(p);
pqm_uninit(&p->pqm);
@@ -1118,7 +1119,9 @@ static void kfd_process_wq_release(struct work_struct *work)
* destroyed. This allows any BOs to be freed without
* triggering pointless evictions or waiting for fences.
*/
- dma_fence_signal(p->ef);
+ synchronize_rcu();
+ ef = rcu_access_pointer(p->ef);
+ dma_fence_signal(ef);
kfd_process_remove_sysfs(p);
@@ -1127,7 +1130,7 @@ static void kfd_process_wq_release(struct work_struct *work)
svm_range_list_fini(p);
kfd_process_destroy_pdds(p);
- dma_fence_put(p->ef);
+ dma_fence_put(ef);
kfd_event_free_process(p);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 77f493262e05..43eff221eae5 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -87,6 +87,8 @@ void kfd_process_dequeue_from_device(struct kfd_process_device *pdd)
return;
dev->dqm->ops.process_termination(dev->dqm, &pdd->qpd);
+ if (dev->kfd->shared_resources.enable_mes)
+ amdgpu_mes_flush_shader_debugger(dev->adev, pdd->proc_ctx_gpu_addr);
pdd->already_dequeued = true;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 613f19510287..c50a0dc9c9c0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -198,6 +198,7 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
pr_debug_ratelimited("dma mapping 0x%llx for page addr 0x%lx\n",
addr[i] >> PAGE_SHIFT, page_to_pfn(page));
}
+
return 0;
}
@@ -349,6 +350,7 @@ svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start,
INIT_LIST_HEAD(&prange->child_list);
atomic_set(&prange->invalid, 0);
prange->validate_timestamp = 0;
+ prange->vram_pages = 0;
mutex_init(&prange->migrate_mutex);
mutex_init(&prange->lock);
@@ -395,19 +397,16 @@ static void svm_range_bo_release(struct kref *kref)
prange->start, prange->last);
mutex_lock(&prange->lock);
prange->svm_bo = NULL;
+ /* prange should not hold vram page now */
+ WARN_ONCE(prange->actual_loc, "prange should not hold vram page");
mutex_unlock(&prange->lock);
spin_lock(&svm_bo->list_lock);
}
spin_unlock(&svm_bo->list_lock);
- if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base)) {
- /* We're not in the eviction worker.
- * Signal the fence and synchronize with any
- * pending eviction work.
- */
+ if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base))
+ /* We're not in the eviction worker. Signal the fence. */
dma_fence_signal(&svm_bo->eviction_fence->base);
- cancel_work_sync(&svm_bo->eviction_work);
- }
dma_fence_put(&svm_bo->eviction_fence->base);
amdgpu_bo_unref(&svm_bo->bo);
kfree(svm_bo);
@@ -878,14 +877,29 @@ static void svm_range_debug_dump(struct svm_range_list *svms)
static void *
svm_range_copy_array(void *psrc, size_t size, uint64_t num_elements,
- uint64_t offset)
+ uint64_t offset, uint64_t *vram_pages)
{
+ unsigned char *src = (unsigned char *)psrc + offset;
unsigned char *dst;
+ uint64_t i;
dst = kvmalloc_array(num_elements, size, GFP_KERNEL);
if (!dst)
return NULL;
- memcpy(dst, (unsigned char *)psrc + offset, num_elements * size);
+
+ if (!vram_pages) {
+ memcpy(dst, src, num_elements * size);
+ return (void *)dst;
+ }
+
+ *vram_pages = 0;
+ for (i = 0; i < num_elements; i++) {
+ dma_addr_t *temp;
+ temp = (dma_addr_t *)dst + i;
+ *temp = *((dma_addr_t *)src + i);
+ if (*temp&SVM_RANGE_VRAM_DOMAIN)
+ (*vram_pages)++;
+ }
return (void *)dst;
}
@@ -899,7 +913,7 @@ svm_range_copy_dma_addrs(struct svm_range *dst, struct svm_range *src)
if (!src->dma_addr[i])
continue;
dst->dma_addr[i] = svm_range_copy_array(src->dma_addr[i],
- sizeof(*src->dma_addr[i]), src->npages, 0);
+ sizeof(*src->dma_addr[i]), src->npages, 0, NULL);
if (!dst->dma_addr[i])
return -ENOMEM;
}
@@ -910,7 +924,7 @@ svm_range_copy_dma_addrs(struct svm_range *dst, struct svm_range *src)
static int
svm_range_split_array(void *ppnew, void *ppold, size_t size,
uint64_t old_start, uint64_t old_n,
- uint64_t new_start, uint64_t new_n)
+ uint64_t new_start, uint64_t new_n, uint64_t *new_vram_pages)
{
unsigned char *new, *old, *pold;
uint64_t d;
@@ -922,11 +936,12 @@ svm_range_split_array(void *ppnew, void *ppold, size_t size,
return 0;
d = (new_start - old_start) * size;
- new = svm_range_copy_array(pold, size, new_n, d);
+ /* get dma addr array for new range and calculte its vram page number */
+ new = svm_range_copy_array(pold, size, new_n, d, new_vram_pages);
if (!new)
return -ENOMEM;
d = (new_start == old_start) ? new_n * size : 0;
- old = svm_range_copy_array(pold, size, old_n, d);
+ old = svm_range_copy_array(pold, size, old_n, d, NULL);
if (!old) {
kvfree(new);
return -ENOMEM;
@@ -948,10 +963,13 @@ svm_range_split_pages(struct svm_range *new, struct svm_range *old,
for (i = 0; i < MAX_GPU_INSTANCE; i++) {
r = svm_range_split_array(&new->dma_addr[i], &old->dma_addr[i],
sizeof(*old->dma_addr[i]), old->start,
- npages, new->start, new->npages);
+ npages, new->start, new->npages,
+ old->actual_loc ? &new->vram_pages : NULL);
if (r)
return r;
}
+ if (old->actual_loc)
+ old->vram_pages -= new->vram_pages;
return 0;
}
@@ -1097,7 +1115,7 @@ static int
svm_range_split_tail(struct svm_range *prange, uint64_t new_last,
struct list_head *insert_list, struct list_head *remap_list)
{
- struct svm_range *tail;
+ struct svm_range *tail = NULL;
int r = svm_range_split(prange, prange->start, new_last, &tail);
if (!r) {
@@ -1112,7 +1130,7 @@ static int
svm_range_split_head(struct svm_range *prange, uint64_t new_start,
struct list_head *insert_list, struct list_head *remap_list)
{
- struct svm_range *head;
+ struct svm_range *head = NULL;
int r = svm_range_split(prange, new_start, prange->last, &head);
if (!r) {
@@ -1135,66 +1153,6 @@ svm_range_add_child(struct svm_range *prange, struct mm_struct *mm,
list_add_tail(&pchild->child_list, &prange->child_list);
}
-/**
- * svm_range_split_by_granularity - collect ranges within granularity boundary
- *
- * @p: the process with svms list
- * @mm: mm structure
- * @addr: the vm fault address in pages, to split the prange
- * @parent: parent range if prange is from child list
- * @prange: prange to split
- *
- * Trims @prange to be a single aligned block of prange->granularity if
- * possible. The head and tail are added to the child_list in @parent.
- *
- * Context: caller must hold mmap_read_lock and prange->lock
- *
- * Return:
- * 0 - OK, otherwise error code
- */
-int
-svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm,
- unsigned long addr, struct svm_range *parent,
- struct svm_range *prange)
-{
- struct svm_range *head, *tail;
- unsigned long start, last, size;
- int r;
-
- /* Align splited range start and size to granularity size, then a single
- * PTE will be used for whole range, this reduces the number of PTE
- * updated and the L1 TLB space used for translation.
- */
- size = 1UL << prange->granularity;
- start = ALIGN_DOWN(addr, size);
- last = ALIGN(addr + 1, size) - 1;
-
- pr_debug("svms 0x%p split [0x%lx 0x%lx] to [0x%lx 0x%lx] size 0x%lx\n",
- prange->svms, prange->start, prange->last, start, last, size);
-
- if (start > prange->start) {
- r = svm_range_split(prange, start, prange->last, &head);
- if (r)
- return r;
- svm_range_add_child(parent, mm, head, SVM_OP_ADD_RANGE);
- }
-
- if (last < prange->last) {
- r = svm_range_split(prange, prange->start, last, &tail);
- if (r)
- return r;
- svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
- }
-
- /* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */
- if (p->xnack_enabled && prange->work_item.op == SVM_OP_ADD_RANGE) {
- prange->work_item.op = SVM_OP_ADD_RANGE_AND_MAP;
- pr_debug("change prange 0x%p [0x%lx 0x%lx] op %d\n",
- prange, prange->start, prange->last,
- SVM_OP_ADD_RANGE_AND_MAP);
- }
- return 0;
-}
static bool
svm_nodes_in_same_hive(struct kfd_node *node_a, struct kfd_node *node_b)
{
@@ -1529,7 +1487,7 @@ static int svm_range_reserve_bos(struct svm_validate_context *ctx, bool intr)
uint32_t gpuidx;
int r;
- drm_exec_init(&ctx->exec, intr ? DRM_EXEC_INTERRUPTIBLE_WAIT: 0);
+ drm_exec_init(&ctx->exec, intr ? DRM_EXEC_INTERRUPTIBLE_WAIT: 0, 0);
drm_exec_until_all_locked(&ctx->exec) {
for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
@@ -1614,6 +1572,7 @@ static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx)
* 5. Release page table (and SVM BO) reservation
*/
static int svm_range_validate_and_map(struct mm_struct *mm,
+ unsigned long map_start, unsigned long map_last,
struct svm_range *prange, int32_t gpuidx,
bool intr, bool wait, bool flush_tlb)
{
@@ -1653,18 +1612,24 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
if (test_bit(gpuidx, prange->bitmap_access))
bitmap_set(ctx->bitmap, gpuidx, 1);
}
+
+ /*
+ * If prange is already mapped or with always mapped flag,
+ * update mapping on GPUs with ACCESS attribute
+ */
+ if (bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) {
+ if (prange->mapped_to_gpu ||
+ prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)
+ bitmap_copy(ctx->bitmap, prange->bitmap_access, MAX_GPU_INSTANCE);
+ }
} else {
bitmap_or(ctx->bitmap, prange->bitmap_access,
prange->bitmap_aip, MAX_GPU_INSTANCE);
}
if (bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) {
- bitmap_copy(ctx->bitmap, prange->bitmap_access, MAX_GPU_INSTANCE);
- if (!prange->mapped_to_gpu ||
- bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) {
- r = 0;
- goto free_ctx;
- }
+ r = 0;
+ goto free_ctx;
}
if (prange->actual_loc && !prange->ttm_res) {
@@ -1688,10 +1653,12 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
}
}
- start = prange->start << PAGE_SHIFT;
- end = (prange->last + 1) << PAGE_SHIFT;
+ start = map_start << PAGE_SHIFT;
+ end = (map_last + 1) << PAGE_SHIFT;
for (addr = start; !r && addr < end; ) {
struct hmm_range *hmm_range;
+ unsigned long map_start_vma;
+ unsigned long map_last_vma;
struct vm_area_struct *vma;
unsigned long next = 0;
unsigned long offset;
@@ -1719,7 +1686,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
}
if (!r) {
- offset = (addr - start) >> PAGE_SHIFT;
+ offset = (addr >> PAGE_SHIFT) - prange->start;
r = svm_range_dma_map(prange, ctx->bitmap, offset, npages,
hmm_range->hmm_pfns);
if (r)
@@ -1737,9 +1704,16 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
r = -EAGAIN;
}
- if (!r)
- r = svm_range_map_to_gpus(prange, offset, npages, readonly,
- ctx->bitmap, wait, flush_tlb);
+ if (!r) {
+ map_start_vma = max(map_start, prange->start + offset);
+ map_last_vma = min(map_last, prange->start + offset + npages - 1);
+ if (map_start_vma <= map_last_vma) {
+ offset = map_start_vma - prange->start;
+ npages = map_last_vma - map_start_vma + 1;
+ r = svm_range_map_to_gpus(prange, offset, npages, readonly,
+ ctx->bitmap, wait, flush_tlb);
+ }
+ }
if (!r && next == end)
prange->mapped_to_gpu = true;
@@ -1832,8 +1806,8 @@ static void svm_range_restore_work(struct work_struct *work)
*/
mutex_lock(&prange->migrate_mutex);
- r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
- false, true, false);
+ r = svm_range_validate_and_map(mm, prange->start, prange->last, prange,
+ MAX_GPU_INSTANCE, false, true, false);
if (r)
pr_debug("failed %d to map 0x%lx to gpus\n", r,
prange->start);
@@ -2001,6 +1975,7 @@ static struct svm_range *svm_range_clone(struct svm_range *old)
new->actual_loc = old->actual_loc;
new->granularity = old->granularity;
new->mapped_to_gpu = old->mapped_to_gpu;
+ new->vram_pages = old->vram_pages;
bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
@@ -2365,8 +2340,10 @@ retry:
mutex_unlock(&svms->lock);
mmap_write_unlock(mm);
- /* Pairs with mmget in svm_range_add_list_work */
- mmput(mm);
+ /* Pairs with mmget in svm_range_add_list_work. If dropping the
+ * last mm refcount, schedule release work to avoid circular locking
+ */
+ mmput_async(mm);
spin_lock(&svms->deferred_list_lock);
}
@@ -2677,6 +2654,7 @@ svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
{
struct vm_area_struct *vma;
struct interval_tree_node *node;
+ struct rb_node *rb_node;
unsigned long start_limit, end_limit;
vma = vma_lookup(p->mm, addr << PAGE_SHIFT);
@@ -2696,16 +2674,15 @@ svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
if (node) {
end_limit = min(end_limit, node->start);
/* Last range that ends before the fault address */
- node = container_of(rb_prev(&node->rb),
- struct interval_tree_node, rb);
+ rb_node = rb_prev(&node->rb);
} else {
/* Last range must end before addr because
* there was no range after addr
*/
- node = container_of(rb_last(&p->svms.objects.rb_root),
- struct interval_tree_node, rb);
+ rb_node = rb_last(&p->svms.objects.rb_root);
}
- if (node) {
+ if (rb_node) {
+ node = container_of(rb_node, struct interval_tree_node, rb);
if (node->last >= addr) {
WARN(1, "Overlap with prev node and page fault addr\n");
return -EFAULT;
@@ -2908,6 +2885,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
uint32_t vmid, uint32_t node_id,
uint64_t addr, bool write_fault)
{
+ unsigned long start, last, size;
struct mm_struct *mm = NULL;
struct svm_range_list *svms;
struct svm_range *prange;
@@ -3043,40 +3021,44 @@ retry_write_locked:
kfd_smi_event_page_fault_start(node, p->lead_thread->pid, addr,
write_fault, timestamp);
- if (prange->actual_loc != best_loc) {
+ /* Align migration range start and size to granularity size */
+ size = 1UL << prange->granularity;
+ start = max_t(unsigned long, ALIGN_DOWN(addr, size), prange->start);
+ last = min_t(unsigned long, ALIGN(addr + 1, size) - 1, prange->last);
+ if (prange->actual_loc != 0 || best_loc != 0) {
migration = true;
+
if (best_loc) {
- r = svm_migrate_to_vram(prange, best_loc, mm,
- KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU);
+ r = svm_migrate_to_vram(prange, best_loc, start, last,
+ mm, KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU);
if (r) {
pr_debug("svm_migrate_to_vram failed (%d) at %llx, falling back to system memory\n",
r, addr);
/* Fallback to system memory if migration to
* VRAM failed
*/
- if (prange->actual_loc)
- r = svm_migrate_vram_to_ram(prange, mm,
- KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU,
- NULL);
+ if (prange->actual_loc && prange->actual_loc != best_loc)
+ r = svm_migrate_vram_to_ram(prange, mm, start, last,
+ KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, NULL);
else
r = 0;
}
} else {
- r = svm_migrate_vram_to_ram(prange, mm,
- KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU,
- NULL);
+ r = svm_migrate_vram_to_ram(prange, mm, start, last,
+ KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, NULL);
}
if (r) {
pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n",
- r, svms, prange->start, prange->last);
+ r, svms, start, last);
goto out_unlock_range;
}
}
- r = svm_range_validate_and_map(mm, prange, gpuidx, false, false, false);
+ r = svm_range_validate_and_map(mm, start, last, prange, gpuidx, false,
+ false, false);
if (r)
pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n",
- r, svms, prange->start, prange->last);
+ r, svms, start, last);
kfd_smi_event_page_fault_end(node, p->lead_thread->pid, addr,
migration);
@@ -3422,18 +3404,24 @@ svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
*migrated = false;
best_loc = svm_range_best_prefetch_location(prange);
- if (best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
- best_loc == prange->actual_loc)
+ /* when best_loc is a gpu node and same as prange->actual_loc
+ * we still need do migration as prange->actual_loc !=0 does
+ * not mean all pages in prange are vram. hmm migrate will pick
+ * up right pages during migration.
+ */
+ if ((best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED) ||
+ (best_loc == 0 && prange->actual_loc == 0))
return 0;
if (!best_loc) {
- r = svm_migrate_vram_to_ram(prange, mm,
+ r = svm_migrate_vram_to_ram(prange, mm, prange->start, prange->last,
KFD_MIGRATE_TRIGGER_PREFETCH, NULL);
*migrated = !r;
return r;
}
- r = svm_migrate_to_vram(prange, best_loc, mm, KFD_MIGRATE_TRIGGER_PREFETCH);
+ r = svm_migrate_to_vram(prange, best_loc, prange->start, prange->last,
+ mm, KFD_MIGRATE_TRIGGER_PREFETCH);
*migrated = !r;
return r;
@@ -3441,13 +3429,14 @@ svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)
{
- if (!fence)
- return -EINVAL;
-
- if (dma_fence_is_signaled(&fence->base))
- return 0;
-
- if (fence->svm_bo) {
+ /* Dereferencing fence->svm_bo is safe here because the fence hasn't
+ * signaled yet and we're under the protection of the fence->lock.
+ * After the fence is signaled in svm_range_bo_release, we cannot get
+ * here any more.
+ *
+ * Reference is dropped in svm_range_evict_svm_bo_worker.
+ */
+ if (svm_bo_ref_unless_zero(fence->svm_bo)) {
WRITE_ONCE(fence->svm_bo->evicting, 1);
schedule_work(&fence->svm_bo->eviction_work);
}
@@ -3462,8 +3451,6 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
int r = 0;
svm_bo = container_of(work, struct svm_range_bo, eviction_work);
- if (!svm_bo_ref_unless_zero(svm_bo))
- return; /* svm_bo was freed while eviction was pending */
if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
mm = svm_bo->eviction_fence->mm;
@@ -3488,7 +3475,11 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
mutex_lock(&prange->migrate_mutex);
do {
+ /* migrate all vram pages in this prange to sys ram
+ * after that prange->actual_loc should be zero
+ */
r = svm_migrate_vram_to_ram(prange, mm,
+ prange->start, prange->last,
KFD_MIGRATE_TRIGGER_TTM_EVICTION, NULL);
} while (!r && prange->actual_loc && --retries);
@@ -3612,8 +3603,8 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
flush_tlb = !migrated && update_mapping && prange->mapped_to_gpu;
- r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
- true, true, flush_tlb);
+ r = svm_range_validate_and_map(mm, prange->start, prange->last, prange,
+ MAX_GPU_INSTANCE, true, true, flush_tlb);
if (r)
pr_debug("failed %d to map svm range\n", r);
@@ -3627,8 +3618,8 @@ out_unlock_range:
pr_debug("Remapping prange 0x%p [0x%lx 0x%lx]\n",
prange, prange->start, prange->last);
mutex_lock(&prange->migrate_mutex);
- r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
- true, true, prange->mapped_to_gpu);
+ r = svm_range_validate_and_map(mm, prange->start, prange->last, prange,
+ MAX_GPU_INSTANCE, true, true, prange->mapped_to_gpu);
if (r)
pr_debug("failed %d on remap svm range\n", r);
mutex_unlock(&prange->migrate_mutex);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
index c528df1d0ba2..026863a0abcd 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -78,6 +78,7 @@ struct svm_work_list_item {
* @update_list:link list node used to add to update_list
* @mapping: bo_va mapping structure to create and update GPU page table
* @npages: number of pages
+ * @vram_pages: vram pages number in this svm_range
* @dma_addr: dma mapping address on each GPU for system memory physical page
* @ttm_res: vram ttm resource map
* @offset: range start offset within mm_nodes
@@ -88,7 +89,9 @@ struct svm_work_list_item {
* @flags: flags defined as KFD_IOCTL_SVM_FLAG_*
* @perferred_loc: perferred location, 0 for CPU, or GPU id
* @perfetch_loc: last prefetch location, 0 for CPU, or GPU id
- * @actual_loc: the actual location, 0 for CPU, or GPU id
+ * @actual_loc: this svm_range location. 0: all pages are from sys ram;
+ * GPU id: this svm_range may include vram pages from GPU with
+ * id actual_loc.
* @granularity:migration granularity, log2 num pages
* @invalid: not 0 means cpu page table is invalidated
* @validate_timestamp: system timestamp when range is validated
@@ -112,6 +115,7 @@ struct svm_range {
struct list_head list;
struct list_head update_list;
uint64_t npages;
+ uint64_t vram_pages;
dma_addr_t *dma_addr[MAX_GPU_INSTANCE];
struct ttm_resource *ttm_res;
uint64_t offset;
@@ -168,9 +172,6 @@ struct kfd_node *svm_range_get_node_by_id(struct svm_range *prange,
int svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
bool clear);
void svm_range_vram_node_free(struct svm_range *prange);
-int svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm,
- unsigned long addr, struct svm_range *parent,
- struct svm_range *prange);
int svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
uint32_t vmid, uint32_t node_id, uint64_t addr,
bool write_fault);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 057284bf50bb..e5f7c92eebcb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1342,10 +1342,11 @@ static int kfd_create_indirect_link_prop(struct kfd_topology_device *kdev, int g
num_cpu++;
}
+ if (list_empty(&kdev->io_link_props))
+ return -ENODATA;
+
gpu_link = list_first_entry(&kdev->io_link_props,
- struct kfd_iolink_properties, list);
- if (!gpu_link)
- return -ENOMEM;
+ struct kfd_iolink_properties, list);
for (i = 0; i < num_cpu; i++) {
/* CPU <--> GPU */
@@ -1423,15 +1424,17 @@ static int kfd_add_peer_prop(struct kfd_topology_device *kdev,
peer->gpu->adev))
return ret;
+ if (list_empty(&kdev->io_link_props))
+ return -ENODATA;
+
iolink1 = list_first_entry(&kdev->io_link_props,
- struct kfd_iolink_properties, list);
- if (!iolink1)
- return -ENOMEM;
+ struct kfd_iolink_properties, list);
+
+ if (list_empty(&peer->io_link_props))
+ return -ENODATA;
iolink2 = list_first_entry(&peer->io_link_props,
- struct kfd_iolink_properties, list);
- if (!iolink2)
- return -ENOMEM;
+ struct kfd_iolink_properties, list);
props = kfd_alloc_struct(props);
if (!props)
@@ -1449,17 +1452,19 @@ static int kfd_add_peer_prop(struct kfd_topology_device *kdev,
/* CPU->CPU link*/
cpu_dev = kfd_topology_device_by_proximity_domain(iolink1->node_to);
if (cpu_dev) {
- list_for_each_entry(iolink3, &cpu_dev->io_link_props, list)
- if (iolink3->node_to == iolink2->node_to)
- break;
-
- props->weight += iolink3->weight;
- props->min_latency += iolink3->min_latency;
- props->max_latency += iolink3->max_latency;
- props->min_bandwidth = min(props->min_bandwidth,
- iolink3->min_bandwidth);
- props->max_bandwidth = min(props->max_bandwidth,
- iolink3->max_bandwidth);
+ list_for_each_entry(iolink3, &cpu_dev->io_link_props, list) {
+ if (iolink3->node_to != iolink2->node_to)
+ continue;
+
+ props->weight += iolink3->weight;
+ props->min_latency += iolink3->min_latency;
+ props->max_latency += iolink3->max_latency;
+ props->min_bandwidth = min(props->min_bandwidth,
+ iolink3->min_bandwidth);
+ props->max_bandwidth = min(props->max_bandwidth,
+ iolink3->max_bandwidth);
+ break;
+ }
} else {
WARN(1, "CPU node not found");
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
index 8bf94920d23e..ab2a97e354da 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
@@ -25,22 +25,25 @@
+ifneq ($(CONFIG_DRM_AMD_DC),)
AMDGPUDM = \
amdgpu_dm.o \
amdgpu_dm_plane.o \
amdgpu_dm_crtc.o \
amdgpu_dm_irq.o \
amdgpu_dm_mst_types.o \
- amdgpu_dm_color.o
+ amdgpu_dm_color.o \
+ amdgpu_dm_services.o \
+ amdgpu_dm_helpers.o \
+ amdgpu_dm_pp_smu.o \
+ amdgpu_dm_psr.o \
+ amdgpu_dm_replay.o \
+ amdgpu_dm_wb.o
ifdef CONFIG_DRM_AMD_DC_FP
AMDGPUDM += dc_fpu.o
endif
-ifneq ($(CONFIG_DRM_AMD_DC),)
-AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o amdgpu_dm_pp_smu.o amdgpu_dm_psr.o amdgpu_dm_replay.o
-endif
-
AMDGPUDM += amdgpu_dm_hdcp.o
ifneq ($(CONFIG_DEBUG_FS),)
@@ -52,3 +55,4 @@ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc
AMDGPU_DM = $(addprefix $(AMDDALPATH)/amdgpu_dm/,$(AMDGPUDM))
AMD_DISPLAY_FILES += $(AMDGPU_DM)
+endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index d6e325c848eb..d4f525b66a09 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -37,6 +37,7 @@
#include "dc/dc_dmub_srv.h"
#include "dc/dc_edid_parser.h"
#include "dc/dc_stat.h"
+#include "dc/dc_state.h"
#include "amdgpu_dm_trace.h"
#include "dpcd_defs.h"
#include "link/protocols/link_dpcd.h"
@@ -54,6 +55,7 @@
#include "amdgpu_dm_crtc.h"
#include "amdgpu_dm_hdcp.h"
#include <drm/display/drm_hdcp_helper.h>
+#include "amdgpu_dm_wb.h"
#include "amdgpu_pm.h"
#include "amdgpu_atombios.h"
@@ -65,7 +67,6 @@
#include "amdgpu_dm_debugfs.h"
#endif
#include "amdgpu_dm_psr.h"
-#include "amdgpu_dm_replay.h"
#include "ivsrcid/ivsrcid_vislands30.h"
@@ -576,6 +577,7 @@ static void dm_crtc_high_irq(void *interrupt_params)
{
struct common_irq_params *irq_params = interrupt_params;
struct amdgpu_device *adev = irq_params->adev;
+ struct drm_writeback_job *job;
struct amdgpu_crtc *acrtc;
unsigned long flags;
int vrr_active;
@@ -584,6 +586,33 @@ static void dm_crtc_high_irq(void *interrupt_params)
if (!acrtc)
return;
+ if (acrtc->wb_pending) {
+ if (acrtc->wb_conn) {
+ spin_lock_irqsave(&acrtc->wb_conn->job_lock, flags);
+ job = list_first_entry_or_null(&acrtc->wb_conn->job_queue,
+ struct drm_writeback_job,
+ list_entry);
+ spin_unlock_irqrestore(&acrtc->wb_conn->job_lock, flags);
+
+ if (job) {
+ unsigned int v_total, refresh_hz;
+ struct dc_stream_state *stream = acrtc->dm_irq_params.stream;
+
+ v_total = stream->adjust.v_total_max ?
+ stream->adjust.v_total_max : stream->timing.v_total;
+ refresh_hz = div_u64((uint64_t) stream->timing.pix_clk_100hz *
+ 100LL, (v_total * stream->timing.h_total));
+ mdelay(1000 / refresh_hz);
+
+ drm_writeback_signal_completion(acrtc->wb_conn, 0);
+ dc_stream_fc_disable_writeback(adev->dm.dc,
+ acrtc->dm_irq_params.stream, 0);
+ }
+ } else
+ DRM_ERROR("%s: no amdgpu_crtc wb_conn\n", __func__);
+ acrtc->wb_pending = false;
+ }
+
vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc);
drm_dbg_vbl(adev_to_drm(adev),
@@ -726,6 +755,10 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
aconnector = to_amdgpu_dm_connector(connector);
if (link && aconnector->dc_link == link) {
if (notify->type == DMUB_NOTIFICATION_HPD)
@@ -949,6 +982,10 @@ static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
aconnector = to_amdgpu_dm_connector(connector);
if (aconnector->audio_inst != port)
continue;
@@ -1257,7 +1294,9 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
/* AGP aperture is disabled */
if (agp_bot > agp_top) {
logical_addr_low = adev->gmc.fb_start >> 18;
- if (adev->apu_flags & AMD_APU_IS_RAVEN2)
+ if (adev->apu_flags & (AMD_APU_IS_RAVEN2 |
+ AMD_APU_IS_RENOIR |
+ AMD_APU_IS_GREEN_SARDINE))
/*
* Raven2 has a HW issue that it is unable to use the vram which
* is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
@@ -1269,7 +1308,9 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
logical_addr_high = adev->gmc.fb_end >> 18;
} else {
logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
- if (adev->apu_flags & AMD_APU_IS_RAVEN2)
+ if (adev->apu_flags & (AMD_APU_IS_RAVEN2 |
+ AMD_APU_IS_RENOIR |
+ AMD_APU_IS_GREEN_SARDINE))
/*
* Raven2 has a HW issue that it is unable to use the vram which
* is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
@@ -1674,6 +1715,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0];
init_data.clk_reg_offsets = adev->reg_offset[CLK_HWIP][0];
+ init_data.flags.disable_ips = DMUB_IPS_DISABLE_ALL;
+
+ /* Enable DWB for tested platforms only */
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0))
+ init_data.num_virtual_links = 1;
+
INIT_LIST_HEAD(&adev->dm.da_list);
retrieve_dmi_info(&adev->dm);
@@ -2251,6 +2298,10 @@ static int detect_mst_link_for_all_connectors(struct drm_device *dev)
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
aconnector = to_amdgpu_dm_connector(connector);
if (aconnector->dc_link->type == dc_connection_mst_branch &&
aconnector->mst_mgr.aux) {
@@ -2379,6 +2430,10 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
aconnector = to_amdgpu_dm_connector(connector);
if (aconnector->dc_link->type != dc_connection_mst_branch ||
aconnector->mst_root)
@@ -2558,12 +2613,10 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
memset(del_streams, 0, sizeof(del_streams));
- context = dc_create_state(dc);
+ context = dc_state_create_current_copy(dc);
if (context == NULL)
goto context_alloc_fail;
- dc_resource_state_copy_construct_current(dc, context);
-
/* First remove from context all streams */
for (i = 0; i < context->stream_count; i++) {
struct dc_stream_state *stream = context->streams[i];
@@ -2573,12 +2626,12 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
/* Remove all planes for removed streams and then remove the streams */
for (i = 0; i < del_streams_count; i++) {
- if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
+ if (!dc_state_rem_all_planes_for_stream(dc, del_streams[i], context)) {
res = DC_FAIL_DETACH_SURFACES;
goto fail;
}
- res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
+ res = dc_state_remove_stream(dc, context, del_streams[i]);
if (res != DC_OK)
goto fail;
}
@@ -2586,7 +2639,7 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
res = dc_commit_streams(dc, context->streams, context->stream_count);
fail:
- dc_release_state(context);
+ dc_state_release(context);
context_alloc_fail:
return res;
@@ -2613,7 +2666,7 @@ static int dm_suspend(void *handle)
dc_allow_idle_optimizations(adev->dm.dc, false);
- dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
+ dm->cached_dc_state = dc_state_create_copy(dm->dc->current_state);
dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
@@ -2638,11 +2691,12 @@ static int dm_suspend(void *handle)
hpd_rx_irq_work_suspend(dm);
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
+ dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D3);
return 0;
}
-struct amdgpu_dm_connector *
+struct drm_connector *
amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
@@ -2655,7 +2709,7 @@ amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
crtc_from_state = new_con_state->crtc;
if (crtc_from_state == crtc)
- return to_amdgpu_dm_connector(connector);
+ return connector;
}
return NULL;
@@ -2806,7 +2860,7 @@ static int dm_resume(void *handle)
bool need_hotplug = false;
if (dm->dc->caps.ips_support) {
- dc_dmub_srv_exit_low_power_state(dm->dc);
+ dc_dmub_srv_apply_idle_power_optimizations(dm->dc, false);
}
if (amdgpu_in_reset(adev)) {
@@ -2833,6 +2887,7 @@ static int dm_resume(void *handle)
if (r)
DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
+ dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0);
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
dc_resume(dm->dc);
@@ -2858,7 +2913,7 @@ static int dm_resume(void *handle)
dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
- dc_release_state(dm->cached_dc_state);
+ dc_state_release(dm->cached_dc_state);
dm->cached_dc_state = NULL;
amdgpu_dm_irq_resume_late(adev);
@@ -2868,10 +2923,9 @@ static int dm_resume(void *handle)
return 0;
}
/* Recreate dc_state - DC invalidates it when setting power state to S3. */
- dc_release_state(dm_state->context);
- dm_state->context = dc_create_state(dm->dc);
+ dc_state_release(dm_state->context);
+ dm_state->context = dc_state_create(dm->dc);
/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
- dc_resource_state_construct(dm->dc, dm_state->context);
/* Before powering on DC we need to re-initialize DMUB. */
dm_dmub_hw_resume(adev);
@@ -2883,6 +2937,7 @@ static int dm_resume(void *handle)
}
/* power on hardware */
+ dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0);
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
/* program HPD filter */
@@ -2900,6 +2955,10 @@ static int dm_resume(void *handle)
/* Do detection*/
drm_connector_list_iter_begin(ddev, &iter);
drm_for_each_connector_iter(connector, &iter) {
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
aconnector = to_amdgpu_dm_connector(connector);
if (!aconnector->dc_link)
@@ -3473,6 +3532,9 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
list_for_each_entry(connector,
&dev->mode_config.connector_list, head) {
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
aconnector = to_amdgpu_dm_connector(connector);
dc_link = aconnector->dc_link;
@@ -3939,7 +4001,7 @@ dm_atomic_duplicate_state(struct drm_private_obj *obj)
old_state = to_dm_atomic_state(obj->state);
if (old_state && old_state->context)
- new_state->context = dc_copy_state(old_state->context);
+ new_state->context = dc_state_create_copy(old_state->context);
if (!new_state->context) {
kfree(new_state);
@@ -3955,7 +4017,7 @@ static void dm_atomic_destroy_state(struct drm_private_obj *obj,
struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
if (dm_state && dm_state->context)
- dc_release_state(dm_state->context);
+ dc_state_release(dm_state->context);
kfree(dm_state);
}
@@ -3991,14 +4053,12 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
if (!state)
return -ENOMEM;
- state->context = dc_create_state(adev->dm.dc);
+ state->context = dc_state_create_current_copy(adev->dm.dc);
if (!state->context) {
kfree(state);
return -ENOMEM;
}
- dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
-
drm_atomic_private_obj_init(adev_to_drm(adev),
&adev->dm.atomic_obj,
&state->base,
@@ -4006,14 +4066,19 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
r = amdgpu_display_modeset_create_props(adev);
if (r) {
- dc_release_state(state->context);
+ dc_state_release(state->context);
kfree(state);
return r;
}
+#ifdef AMD_PRIVATE_COLOR
+ if (amdgpu_dm_create_color_properties(adev))
+ return -ENOMEM;
+#endif
+
r = amdgpu_dm_audio_init(adev);
if (r) {
- dc_release_state(state->context);
+ dc_state_release(state->context);
kfree(state);
return r;
}
@@ -4327,7 +4392,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
enum dc_connection_type new_connection_type = dc_connection_none;
const struct dc_plane_cap *plane;
bool psr_feature_enabled = false;
- bool replay_feature_enabled = false;
int max_overlay = dm->dc->caps.max_slave_planes;
dm->display_indexes_num = dm->dc->caps.max_streams;
@@ -4439,20 +4503,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
}
}
- if (!(amdgpu_dc_debug_mask & DC_DISABLE_REPLAY)) {
- switch (adev->ip_versions[DCE_HWIP][0]) {
- case IP_VERSION(3, 1, 4):
- case IP_VERSION(3, 1, 5):
- case IP_VERSION(3, 1, 6):
- case IP_VERSION(3, 2, 0):
- case IP_VERSION(3, 2, 1):
- replay_feature_enabled = true;
- break;
- default:
- replay_feature_enabled = amdgpu_dc_feature_mask & DC_REPLAY_MASK;
- break;
- }
- }
/* loops over all connectors on the board */
for (i = 0; i < link_cnt; i++) {
struct dc_link *link = NULL;
@@ -4464,6 +4514,28 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
continue;
}
+ link = dc_get_link_at_index(dm->dc, i);
+
+ if (link->connector_signal == SIGNAL_TYPE_VIRTUAL) {
+ struct amdgpu_dm_wb_connector *wbcon = kzalloc(sizeof(*wbcon), GFP_KERNEL);
+
+ if (!wbcon) {
+ DRM_ERROR("KMS: Failed to allocate writeback connector\n");
+ continue;
+ }
+
+ if (amdgpu_dm_wb_connector_init(dm, wbcon, i)) {
+ DRM_ERROR("KMS: Failed to initialize writeback connector\n");
+ kfree(wbcon);
+ continue;
+ }
+
+ link->psr_settings.psr_feature_enabled = false;
+ link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
+
+ continue;
+ }
+
aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
if (!aconnector)
goto fail;
@@ -4482,8 +4554,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
goto fail;
}
- link = dc_get_link_at_index(dm->dc, i);
-
if (!dc_link_detect_connection_type(link, &new_connection_type))
DRM_ERROR("KMS: Failed to detect connector\n");
@@ -4501,12 +4571,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
amdgpu_dm_update_connector_after_detect(aconnector);
setup_backlight_device(dm, aconnector);
- /*
- * Disable psr if replay can be enabled
- */
- if (replay_feature_enabled && amdgpu_dm_setup_replay(link, aconnector))
- psr_feature_enabled = false;
-
if (psr_feature_enabled)
amdgpu_dm_set_psr_caps(link);
@@ -5088,7 +5152,9 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
* Always set input transfer function, since plane state is refreshed
* every time.
*/
- ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
+ ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state,
+ plane_state,
+ dc_plane_state);
if (ret)
return ret;
@@ -5164,6 +5230,9 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
if (plane->type == DRM_PLANE_TYPE_CURSOR)
return;
+ if (new_plane_state->rotation != DRM_MODE_ROTATE_0)
+ goto ffu;
+
num_clips = drm_plane_get_damage_clips_count(new_plane_state);
clips = drm_plane_get_damage_clips(new_plane_state);
@@ -5490,10 +5559,13 @@ static void fill_stream_properties_from_drm_display_mode(
{
struct dc_crtc_timing *timing_out = &stream->timing;
const struct drm_display_info *info = &connector->display_info;
- struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct amdgpu_dm_connector *aconnector = NULL;
struct hdmi_vendor_infoframe hv_frame;
struct hdmi_avi_infoframe avi_frame;
+ if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
+ aconnector = to_amdgpu_dm_connector(connector);
+
memset(&hv_frame, 0, sizeof(hv_frame));
memset(&avi_frame, 0, sizeof(avi_frame));
@@ -5506,6 +5578,7 @@ static void fill_stream_properties_from_drm_display_mode(
&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
else if (drm_mode_is_420_also(info, mode_in)
+ && aconnector
&& aconnector->force_yuv420_output)
timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
@@ -5541,7 +5614,7 @@ static void fill_stream_properties_from_drm_display_mode(
timing_out->hdmi_vic = hv_frame.vic;
}
- if (is_freesync_video_mode(mode_in, aconnector)) {
+ if (aconnector && is_freesync_video_mode(mode_in, aconnector)) {
timing_out->h_addressable = mode_in->hdisplay;
timing_out->h_total = mode_in->htotal;
timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
@@ -5662,13 +5735,13 @@ decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
}
static struct dc_sink *
-create_fake_sink(struct amdgpu_dm_connector *aconnector)
+create_fake_sink(struct dc_link *link)
{
struct dc_sink_init_data sink_init_data = { 0 };
struct dc_sink *sink = NULL;
- sink_init_data.link = aconnector->dc_link;
- sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
+ sink_init_data.link = link;
+ sink_init_data.sink_signal = link->connector_signal;
sink = dc_sink_create(&sink_init_data);
if (!sink) {
@@ -6018,14 +6091,14 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
}
static struct dc_stream_state *
-create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
+create_stream_for_sink(struct drm_connector *connector,
const struct drm_display_mode *drm_mode,
const struct dm_connector_state *dm_state,
const struct dc_stream_state *old_stream,
int requested_bpc)
{
+ struct amdgpu_dm_connector *aconnector = NULL;
struct drm_display_mode *preferred_mode = NULL;
- struct drm_connector *drm_connector;
const struct drm_connector_state *con_state = &dm_state->base;
struct dc_stream_state *stream = NULL;
struct drm_display_mode mode;
@@ -6039,22 +6112,35 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
enum color_transfer_func tf = TRANSFER_FUNC_UNKNOWN;
struct dsc_dec_dpcd_caps dsc_caps;
+ struct dc_link *link = NULL;
struct dc_sink *sink = NULL;
drm_mode_init(&mode, drm_mode);
memset(&saved_mode, 0, sizeof(saved_mode));
- if (aconnector == NULL) {
- DRM_ERROR("aconnector is NULL!\n");
+ if (connector == NULL) {
+ DRM_ERROR("connector is NULL!\n");
return stream;
}
- drm_connector = &aconnector->base;
+ if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) {
+ aconnector = NULL;
+ aconnector = to_amdgpu_dm_connector(connector);
+ link = aconnector->dc_link;
+ } else {
+ struct drm_writeback_connector *wbcon = NULL;
+ struct amdgpu_dm_wb_connector *dm_wbcon = NULL;
+
+ wbcon = drm_connector_to_writeback(connector);
+ dm_wbcon = to_amdgpu_dm_wb_connector(wbcon);
+ link = dm_wbcon->link;
+ }
- if (!aconnector->dc_sink) {
- sink = create_fake_sink(aconnector);
+ if (!aconnector || !aconnector->dc_sink) {
+ sink = create_fake_sink(link);
if (!sink)
return stream;
+
} else {
sink = aconnector->dc_sink;
dc_sink_retain(sink);
@@ -6067,12 +6153,13 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
goto finish;
}
+ /* We leave this NULL for writeback connectors */
stream->dm_stream_context = aconnector;
stream->timing.flags.LTE_340MCSC_SCRAMBLE =
- drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
+ connector->display_info.hdmi.scdc.scrambling.low_rates;
- list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
+ list_for_each_entry(preferred_mode, &connector->modes, head) {
/* Search for preferred mode */
if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
native_mode_found = true;
@@ -6081,7 +6168,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
}
if (!native_mode_found)
preferred_mode = list_first_entry_or_null(
- &aconnector->base.modes,
+ &connector->modes,
struct drm_display_mode,
head);
@@ -6095,7 +6182,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
* and the modelist may not be filled in time.
*/
DRM_DEBUG_DRIVER("No preferred mode found\n");
- } else {
+ } else if (aconnector) {
recalculate_timing = is_freesync_video_mode(&mode, aconnector);
if (recalculate_timing) {
freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
@@ -6118,13 +6205,17 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
*/
if (!scale || mode_refresh != preferred_refresh)
fill_stream_properties_from_drm_display_mode(
- stream, &mode, &aconnector->base, con_state, NULL,
+ stream, &mode, connector, con_state, NULL,
requested_bpc);
else
fill_stream_properties_from_drm_display_mode(
- stream, &mode, &aconnector->base, con_state, old_stream,
+ stream, &mode, connector, con_state, old_stream,
requested_bpc);
+ /* The rest isn't needed for writeback connectors */
+ if (!aconnector)
+ goto finish;
+
if (aconnector->timing_changed) {
drm_dbg(aconnector->base.dev,
"overriding timing for automated test, bpc %d, changing to %d\n",
@@ -6142,15 +6233,16 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
fill_audio_info(
&stream->audio_info,
- drm_connector,
+ connector,
sink);
update_stream_signal(stream, sink);
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
-
- if (stream->link->psr_settings.psr_feature_enabled || stream->link->replay_settings.replay_feature_enabled) {
+ else if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
+ stream->signal == SIGNAL_TYPE_EDP) {
//
// should decide stream support vsc sdp colorimetry capability
// before building vsc info packet
@@ -6166,8 +6258,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22)
tf = TRANSFER_FUNC_GAMMA_22;
mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);
- aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
+ if (stream->link->psr_settings.psr_feature_enabled)
+ aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
}
finish:
dc_sink_release(sink);
@@ -6547,7 +6640,7 @@ static enum dc_status dm_validate_stream_and_context(struct dc *dc,
if (!dc_plane_state)
goto cleanup;
- dc_state = dc_create_state(dc);
+ dc_state = dc_state_create(dc);
if (!dc_state)
goto cleanup;
@@ -6574,9 +6667,9 @@ static enum dc_status dm_validate_stream_and_context(struct dc *dc,
dc_result = dc_validate_plane(dc, dc_plane_state);
if (dc_result == DC_OK)
- dc_result = dc_add_stream_to_ctx(dc, dc_state, stream);
+ dc_result = dc_state_add_stream(dc, dc_state, stream);
- if (dc_result == DC_OK && !dc_add_plane_to_context(
+ if (dc_result == DC_OK && !dc_state_add_plane(
dc,
stream,
dc_plane_state,
@@ -6588,7 +6681,7 @@ static enum dc_status dm_validate_stream_and_context(struct dc *dc,
cleanup:
if (dc_state)
- dc_release_state(dc_state);
+ dc_state_release(dc_state);
if (dc_plane_state)
dc_plane_state_release(dc_plane_state);
@@ -6610,7 +6703,7 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
enum dc_status dc_result = DC_OK;
do {
- stream = create_stream_for_sink(aconnector, drm_mode,
+ stream = create_stream_for_sink(connector, drm_mode,
dm_state, old_stream,
requested_bpc);
if (stream == NULL) {
@@ -6618,6 +6711,9 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
break;
}
+ if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ return stream;
+
dc_result = dc_validate_stream(adev->dm.dc, stream);
if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream);
@@ -6893,8 +6989,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
if (IS_ERR(mst_state))
return PTR_ERR(mst_state);
- if (!mst_state->pbn_div.full)
- mst_state->pbn_div.full = dfixed_const(dm_mst_get_pbn_divider(aconnector->mst_root->dc_link));
+ mst_state->pbn_div.full = dfixed_const(dm_mst_get_pbn_divider(aconnector->mst_root->dc_link));
if (!state->duplicated) {
int max_bpc = conn_state->max_requested_bpc;
@@ -6938,6 +7033,9 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
for_each_new_connector_in_state(state, connector, new_con_state, i) {
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
aconnector = to_amdgpu_dm_connector(connector);
if (!aconnector->mst_output_port)
@@ -7517,7 +7615,6 @@ create_i2c(struct ddc_service *ddc_service,
if (!i2c)
return NULL;
i2c->base.owner = THIS_MODULE;
- i2c->base.class = I2C_CLASS_DDC;
i2c->base.dev.parent = &adev->pdev->dev;
i2c->base.algo = &amdgpu_dm_i2c_algo;
snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
@@ -7543,6 +7640,7 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
struct dc_link *link = dc_get_link_at_index(dc, link_index);
struct amdgpu_i2c_adapter *i2c;
+ /* Not needed for writeback connector */
link->priv = aconnector;
@@ -8153,6 +8251,10 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
+ bundle->surface_updates[planes_count].hdr_mult = dc_plane->hdr_mult;
+ bundle->surface_updates[planes_count].func_shaper = dc_plane->in_shaper_func;
+ bundle->surface_updates[planes_count].lut3d_func = dc_plane->lut3d_func;
+ bundle->surface_updates[planes_count].blend_tf = dc_plane->blend_tf;
}
amdgpu_dm_plane_fill_dc_scaling_info(dm->adev, new_plane_state,
@@ -8364,6 +8466,10 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
&acrtc_state->stream->csc_color_matrix;
bundle->stream_update.out_transfer_func =
acrtc_state->stream->out_transfer_func;
+ bundle->stream_update.lut3d_func =
+ (struct dc_3dlut *) acrtc_state->stream->lut3d_func;
+ bundle->stream_update.func_shaper =
+ (struct dc_transfer_func *) acrtc_state->stream->func_shaper;
}
acrtc_state->stream->abm_level = acrtc_state->abm_level;
@@ -8497,6 +8603,9 @@ static void amdgpu_dm_commit_audio(struct drm_device *dev,
if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
continue;
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
notify:
aconnector = to_amdgpu_dm_connector(connector);
@@ -8530,6 +8639,9 @@ notify:
if (!status)
continue;
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
aconnector = to_amdgpu_dm_connector(connector);
mutex_lock(&adev->dm.audio_lock);
@@ -8555,6 +8667,12 @@ static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_stat
stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
}
+static void dm_clear_writeback(struct amdgpu_display_manager *dm,
+ struct dm_crtc_state *crtc_state)
+{
+ dc_stream_remove_writeback(dm->dc, crtc_state->stream, 0);
+}
+
static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
struct dc_state *dc_state)
{
@@ -8564,9 +8682,38 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+ struct drm_connector_state *old_con_state;
+ struct drm_connector *connector;
bool mode_set_reset_required = false;
u32 i;
+ /* Disable writeback */
+ for_each_old_connector_in_state(state, connector, old_con_state, i) {
+ struct dm_connector_state *dm_old_con_state;
+ struct amdgpu_crtc *acrtc;
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ old_crtc_state = NULL;
+
+ dm_old_con_state = to_dm_connector_state(old_con_state);
+ if (!dm_old_con_state->base.crtc)
+ continue;
+
+ acrtc = to_amdgpu_crtc(dm_old_con_state->base.crtc);
+ if (acrtc)
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
+
+ if (!acrtc->wb_enabled)
+ continue;
+
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+ dm_clear_writeback(dm, dm_old_crtc_state);
+ acrtc->wb_enabled = false;
+ }
+
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
@@ -8691,7 +8838,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
dc_stream_get_status(dm_new_crtc_state->stream);
if (!status)
- status = dc_stream_get_status_from_state(dc_state,
+ status = dc_state_get_stream_status(dc_state,
dm_new_crtc_state->stream);
if (!status)
drm_err(dev,
@@ -8703,6 +8850,105 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
}
}
+static void dm_set_writeback(struct amdgpu_display_manager *dm,
+ struct dm_crtc_state *crtc_state,
+ struct drm_connector *connector,
+ struct drm_connector_state *new_con_state)
+{
+ struct drm_writeback_connector *wb_conn = drm_connector_to_writeback(connector);
+ struct amdgpu_device *adev = dm->adev;
+ struct amdgpu_crtc *acrtc;
+ struct dc_writeback_info *wb_info;
+ struct pipe_ctx *pipe = NULL;
+ struct amdgpu_framebuffer *afb;
+ int i = 0;
+
+ wb_info = kzalloc(sizeof(*wb_info), GFP_KERNEL);
+ if (!wb_info) {
+ DRM_ERROR("Failed to allocate wb_info\n");
+ return;
+ }
+
+ acrtc = to_amdgpu_crtc(wb_conn->encoder.crtc);
+ if (!acrtc) {
+ DRM_ERROR("no amdgpu_crtc found\n");
+ kfree(wb_info);
+ return;
+ }
+
+ afb = to_amdgpu_framebuffer(new_con_state->writeback_job->fb);
+ if (!afb) {
+ DRM_ERROR("No amdgpu_framebuffer found\n");
+ kfree(wb_info);
+ return;
+ }
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (dm->dc->current_state->res_ctx.pipe_ctx[i].stream == crtc_state->stream) {
+ pipe = &dm->dc->current_state->res_ctx.pipe_ctx[i];
+ break;
+ }
+ }
+
+ /* fill in wb_info */
+ wb_info->wb_enabled = true;
+
+ wb_info->dwb_pipe_inst = 0;
+ wb_info->dwb_params.dwbscl_black_color = 0;
+ wb_info->dwb_params.hdr_mult = 0x1F000;
+ wb_info->dwb_params.csc_params.gamut_adjust_type = CM_GAMUT_ADJUST_TYPE_BYPASS;
+ wb_info->dwb_params.csc_params.gamut_coef_format = CM_GAMUT_REMAP_COEF_FORMAT_S2_13;
+ wb_info->dwb_params.output_depth = DWB_OUTPUT_PIXEL_DEPTH_10BPC;
+ wb_info->dwb_params.cnv_params.cnv_out_bpc = DWB_CNV_OUT_BPC_10BPC;
+
+ /* width & height from crtc */
+ wb_info->dwb_params.cnv_params.src_width = acrtc->base.mode.crtc_hdisplay;
+ wb_info->dwb_params.cnv_params.src_height = acrtc->base.mode.crtc_vdisplay;
+ wb_info->dwb_params.dest_width = acrtc->base.mode.crtc_hdisplay;
+ wb_info->dwb_params.dest_height = acrtc->base.mode.crtc_vdisplay;
+
+ wb_info->dwb_params.cnv_params.crop_en = false;
+ wb_info->dwb_params.stereo_params.stereo_enabled = false;
+
+ wb_info->dwb_params.cnv_params.out_max_pix_val = 0x3ff; // 10 bits
+ wb_info->dwb_params.cnv_params.out_min_pix_val = 0;
+ wb_info->dwb_params.cnv_params.fc_out_format = DWB_OUT_FORMAT_32BPP_ARGB;
+ wb_info->dwb_params.cnv_params.out_denorm_mode = DWB_OUT_DENORM_BYPASS;
+
+ wb_info->dwb_params.out_format = dwb_scaler_mode_bypass444;
+
+ wb_info->dwb_params.capture_rate = dwb_capture_rate_0;
+
+ wb_info->dwb_params.scaler_taps.h_taps = 4;
+ wb_info->dwb_params.scaler_taps.v_taps = 4;
+ wb_info->dwb_params.scaler_taps.h_taps_c = 2;
+ wb_info->dwb_params.scaler_taps.v_taps_c = 2;
+ wb_info->dwb_params.subsample_position = DWB_INTERSTITIAL_SUBSAMPLING;
+
+ wb_info->mcif_buf_params.luma_pitch = afb->base.pitches[0];
+ wb_info->mcif_buf_params.chroma_pitch = afb->base.pitches[1];
+
+ for (i = 0; i < DWB_MCIF_BUF_COUNT; i++) {
+ wb_info->mcif_buf_params.luma_address[i] = afb->address;
+ wb_info->mcif_buf_params.chroma_address[i] = 0;
+ }
+
+ wb_info->mcif_buf_params.p_vmid = 1;
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0)) {
+ wb_info->mcif_warmup_params.start_address.quad_part = afb->address;
+ wb_info->mcif_warmup_params.region_size =
+ wb_info->mcif_buf_params.luma_pitch * wb_info->dwb_params.dest_height;
+ }
+ wb_info->mcif_warmup_params.p_vmid = 1;
+ wb_info->writeback_source_plane = pipe->plane_state;
+
+ dc_stream_add_writeback(dm->dc, crtc_state->stream, wb_info);
+
+ acrtc->wb_pending = true;
+ acrtc->wb_conn = wb_conn;
+ drm_writeback_queue_job(wb_conn, new_con_state);
+}
+
/**
* amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
* @state: The atomic state to commit
@@ -8735,7 +8981,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
if (new_con_state->crtc &&
new_con_state->crtc->state->active &&
drm_atomic_crtc_needs_modeset(new_con_state->crtc->state)) {
- dc_dmub_srv_exit_low_power_state(dm->dc);
+ dc_dmub_srv_apply_idle_power_optimizations(dm->dc, false);
break;
}
}
@@ -8753,7 +8999,12 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
- struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct amdgpu_dm_connector *aconnector;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
if (!adev->dm.hdcp_workqueue)
continue;
@@ -9030,6 +9281,31 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
amdgpu_dm_commit_planes(state, dev, dm, crtc, wait_for_vblank);
}
+ /* Enable writeback */
+ for_each_new_connector_in_state(state, connector, new_con_state, i) {
+ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ if (!new_con_state->writeback_job)
+ continue;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+
+ if (!new_crtc_state)
+ continue;
+
+ if (acrtc->wb_enabled)
+ continue;
+
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ dm_set_writeback(dm, dm_new_crtc_state, connector, new_con_state);
+ acrtc->wb_enabled = true;
+ }
+
/* Update audio instances for each connector. */
amdgpu_dm_commit_audio(dev, state);
@@ -9147,10 +9423,15 @@ out:
void dm_restore_drm_connector_state(struct drm_device *dev,
struct drm_connector *connector)
{
- struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct amdgpu_dm_connector *aconnector;
struct amdgpu_crtc *disconnected_acrtc;
struct dm_crtc_state *acrtc_state;
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ return;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+
if (!aconnector->dc_sink || !connector->state || !connector->encoder)
return;
@@ -9227,12 +9508,16 @@ static void get_freesync_config_for_crtc(
struct dm_connector_state *new_con_state)
{
struct mod_freesync_config config = {0};
- struct amdgpu_dm_connector *aconnector =
- to_amdgpu_dm_connector(new_con_state->base.connector);
+ struct amdgpu_dm_connector *aconnector;
struct drm_display_mode *mode = &new_crtc_state->base.mode;
int vrefresh = drm_mode_vrefresh(mode);
bool fs_vid_mode = false;
+ if (new_con_state->base.connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ return;
+
+ aconnector = to_amdgpu_dm_connector(new_con_state->base.connector);
+
new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
vrefresh >= aconnector->min_vfreq &&
vrefresh <= aconnector->max_vfreq;
@@ -9332,6 +9617,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
* update changed items
*/
struct amdgpu_crtc *acrtc = NULL;
+ struct drm_connector *connector = NULL;
struct amdgpu_dm_connector *aconnector = NULL;
struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
@@ -9341,15 +9627,17 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
acrtc = to_amdgpu_crtc(crtc);
- aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
+ connector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
+ if (connector)
+ aconnector = to_amdgpu_dm_connector(connector);
/* TODO This hack should go away */
- if (aconnector && enable) {
+ if (connector && enable) {
/* Make sure fake sink is created in plug-in scenario */
drm_new_conn_state = drm_atomic_get_new_connector_state(state,
- &aconnector->base);
+ connector);
drm_old_conn_state = drm_atomic_get_old_connector_state(state,
- &aconnector->base);
+ connector);
if (IS_ERR(drm_new_conn_state)) {
ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
@@ -9475,7 +9763,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
crtc->base.id);
/* i.e. reset mode */
- if (dc_remove_stream_from_ctx(
+ if (dc_state_remove_stream(
dm->dc,
dm_state->context,
dm_old_crtc_state->stream) != DC_OK) {
@@ -9496,7 +9784,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
* added MST connectors not found in existing crtc_state in the chained mode
* TODO: need to dig out the root cause of that
*/
- if (!aconnector)
+ if (!connector)
goto skip_modeset;
if (modereset_required(new_crtc_state))
@@ -9518,7 +9806,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
crtc->base.id);
- if (dc_add_stream_to_ctx(
+ if (dc_state_add_stream(
dm->dc,
dm_state->context,
dm_new_crtc_state->stream) != DC_OK) {
@@ -9539,7 +9827,7 @@ skip_modeset:
* We want to do dc stream updates that do not require a
* full modeset below.
*/
- if (!(enable && aconnector && new_crtc_state->active))
+ if (!(enable && connector && new_crtc_state->active))
return 0;
/*
* Given above conditions, the dc state cannot be NULL because:
@@ -9565,6 +9853,7 @@ skip_modeset:
* when a modeset is needed, to ensure it gets reprogrammed.
*/
if (dm_new_crtc_state->base.color_mgmt_changed ||
+ dm_old_crtc_state->regamma_tf != dm_new_crtc_state->regamma_tf ||
drm_atomic_crtc_needs_modeset(new_crtc_state)) {
ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
if (ret)
@@ -9598,7 +9887,8 @@ static bool should_reset_plane(struct drm_atomic_state *state,
* TODO: Remove this hack for all asics once it proves that the
* fast updates works fine on DCN3.2+.
*/
- if (adev->ip_versions[DCE_HWIP][0] < IP_VERSION(3, 2, 0) && state->allow_modeset)
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0) < IP_VERSION(3, 2, 0) &&
+ state->allow_modeset)
return true;
/* Exit early if we know that we're adding or removing the plane. */
@@ -9632,6 +9922,10 @@ static bool should_reset_plane(struct drm_atomic_state *state,
*/
for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
struct amdgpu_framebuffer *old_afb, *new_afb;
+ struct dm_plane_state *dm_new_other_state, *dm_old_other_state;
+
+ dm_new_other_state = to_dm_plane_state(new_other_state);
+ dm_old_other_state = to_dm_plane_state(old_other_state);
if (other->type == DRM_PLANE_TYPE_CURSOR)
continue;
@@ -9668,6 +9962,18 @@ static bool should_reset_plane(struct drm_atomic_state *state,
old_other_state->color_encoding != new_other_state->color_encoding)
return true;
+ /* HDR/Transfer Function changes. */
+ if (dm_old_other_state->degamma_tf != dm_new_other_state->degamma_tf ||
+ dm_old_other_state->degamma_lut != dm_new_other_state->degamma_lut ||
+ dm_old_other_state->hdr_mult != dm_new_other_state->hdr_mult ||
+ dm_old_other_state->ctm != dm_new_other_state->ctm ||
+ dm_old_other_state->shaper_lut != dm_new_other_state->shaper_lut ||
+ dm_old_other_state->shaper_tf != dm_new_other_state->shaper_tf ||
+ dm_old_other_state->lut3d != dm_new_other_state->lut3d ||
+ dm_old_other_state->blend_lut != dm_new_other_state->blend_lut ||
+ dm_old_other_state->blend_tf != dm_new_other_state->blend_tf)
+ return true;
+
/* Framebuffer checks fall at the end. */
if (!old_other_state->fb || !new_other_state->fb)
continue;
@@ -9822,7 +10128,7 @@ static int dm_update_plane_state(struct dc *dc,
if (ret)
return ret;
- if (!dc_remove_plane_from_context(
+ if (!dc_state_remove_plane(
dc,
dm_old_crtc_state->stream,
dm_old_plane_state->dc_state,
@@ -9900,7 +10206,7 @@ static int dm_update_plane_state(struct dc *dc,
* state. It'll be released when the atomic state is
* cleaned.
*/
- if (!dc_add_plane_to_context(
+ if (!dc_state_add_plane(
dc,
dm_new_crtc_state->stream,
dc_new_plane_state,
@@ -10062,6 +10368,9 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm
if (conn_state->crtc != crtc)
continue;
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
aconnector = to_amdgpu_dm_connector(connector);
if (!aconnector->mst_output_port || !aconnector->mst_root)
aconnector = NULL;
@@ -10581,7 +10890,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
input->cea_total_length = total_length;
memcpy(input->payload, data, length);
- res = dm_execute_dmub_cmd(dm->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
+ res = dc_wake_and_execute_dmub_cmd(dm->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
if (!res) {
DRM_ERROR("EDID CEA parser failed\n");
return false;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 3d480be802cb..9c1871b866cc 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -32,6 +32,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_plane.h>
#include "link_service_types.h"
+#include <drm/drm_writeback.h>
/*
* This file contains the definition for amdgpu_display_manager
@@ -54,6 +55,9 @@
#define HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID 0x00001A
#define AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE 0x40
#define HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3 0x3
+
+#define AMDGPU_HDR_MULT_DEFAULT (0x100000000LL)
+
/*
#include "include/amdgpu_dal_power_if.h"
#include "amdgpu_dm_irq.h"
@@ -714,11 +718,107 @@ static inline void amdgpu_dm_set_mst_status(uint8_t *status,
#define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)
+struct amdgpu_dm_wb_connector {
+ struct drm_writeback_connector base;
+ struct dc_link *link;
+};
+
+#define to_amdgpu_dm_wb_connector(x) container_of(x, struct amdgpu_dm_wb_connector, base)
+
extern const struct amdgpu_ip_block_version dm_ip_block;
+/* enum amdgpu_transfer_function: pre-defined transfer function supported by AMD.
+ *
+ * It includes standardized transfer functions and pure power functions. The
+ * transfer function coefficients are available at modules/color/color_gamma.c
+ */
+enum amdgpu_transfer_function {
+ AMDGPU_TRANSFER_FUNCTION_DEFAULT,
+ AMDGPU_TRANSFER_FUNCTION_SRGB_EOTF,
+ AMDGPU_TRANSFER_FUNCTION_BT709_INV_OETF,
+ AMDGPU_TRANSFER_FUNCTION_PQ_EOTF,
+ AMDGPU_TRANSFER_FUNCTION_IDENTITY,
+ AMDGPU_TRANSFER_FUNCTION_GAMMA22_EOTF,
+ AMDGPU_TRANSFER_FUNCTION_GAMMA24_EOTF,
+ AMDGPU_TRANSFER_FUNCTION_GAMMA26_EOTF,
+ AMDGPU_TRANSFER_FUNCTION_SRGB_INV_EOTF,
+ AMDGPU_TRANSFER_FUNCTION_BT709_OETF,
+ AMDGPU_TRANSFER_FUNCTION_PQ_INV_EOTF,
+ AMDGPU_TRANSFER_FUNCTION_GAMMA22_INV_EOTF,
+ AMDGPU_TRANSFER_FUNCTION_GAMMA24_INV_EOTF,
+ AMDGPU_TRANSFER_FUNCTION_GAMMA26_INV_EOTF,
+ AMDGPU_TRANSFER_FUNCTION_COUNT
+};
+
struct dm_plane_state {
struct drm_plane_state base;
struct dc_plane_state *dc_state;
+
+ /* Plane color mgmt */
+ /**
+ * @degamma_lut:
+ *
+ * 1D LUT for mapping framebuffer/plane pixel data before sampling or
+ * blending operations. It's usually applied to linearize input space.
+ * The blob (if not NULL) is an array of &struct drm_color_lut.
+ */
+ struct drm_property_blob *degamma_lut;
+ /**
+ * @degamma_tf:
+ *
+ * Predefined transfer function to tell DC driver the input space to
+ * linearize.
+ */
+ enum amdgpu_transfer_function degamma_tf;
+ /**
+ * @hdr_mult:
+ *
+ * Multiplier to 'gain' the plane. When PQ is decoded using the fixed
+ * func transfer function to the internal FP16 fb, 1.0 -> 80 nits (on
+ * AMD at least). When sRGB is decoded, 1.0 -> 1.0, obviously.
+ * Therefore, 1.0 multiplier = 80 nits for SDR content. So if you
+ * want, 203 nits for SDR content, pass in (203.0 / 80.0). Format is
+ * S31.32 sign-magnitude.
+ *
+ * HDR multiplier can wide range beyond [0.0, 1.0]. This means that PQ
+ * TF is needed for any subsequent linear-to-non-linear transforms.
+ */
+ __u64 hdr_mult;
+ /**
+ * @ctm:
+ *
+ * Color transformation matrix. The blob (if not NULL) is a &struct
+ * drm_color_ctm_3x4.
+ */
+ struct drm_property_blob *ctm;
+ /**
+ * @shaper_lut: shaper lookup table blob. The blob (if not NULL) is an
+ * array of &struct drm_color_lut.
+ */
+ struct drm_property_blob *shaper_lut;
+ /**
+ * @shaper_tf:
+ *
+ * Predefined transfer function to delinearize color space.
+ */
+ enum amdgpu_transfer_function shaper_tf;
+ /**
+ * @lut3d: 3D lookup table blob. The blob (if not NULL) is an array of
+ * &struct drm_color_lut.
+ */
+ struct drm_property_blob *lut3d;
+ /**
+ * @blend_lut: blend lut lookup table blob. The blob (if not NULL) is an
+ * array of &struct drm_color_lut.
+ */
+ struct drm_property_blob *blend_lut;
+ /**
+ * @blend_tf:
+ *
+ * Pre-defined transfer function for converting plane pixel data before
+ * applying blend LUT.
+ */
+ enum amdgpu_transfer_function blend_tf;
};
struct dm_crtc_state {
@@ -743,6 +843,14 @@ struct dm_crtc_state {
struct dc_info_packet vrr_infopacket;
int abm_level;
+
+ /**
+ * @regamma_tf:
+ *
+ * Pre-defined transfer function for converting internal FB -> wire
+ * encoding.
+ */
+ enum amdgpu_transfer_function regamma_tf;
};
#define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base)
@@ -804,14 +912,22 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
void amdgpu_dm_trigger_timing_sync(struct drm_device *dev);
+/* 3D LUT max size is 17x17x17 (4913 entries) */
+#define MAX_COLOR_3DLUT_SIZE 17
+#define MAX_COLOR_3DLUT_BITDEPTH 12
+int amdgpu_dm_verify_lut3d_size(struct amdgpu_device *adev,
+ struct drm_plane_state *plane_state);
+/* 1D LUT size */
#define MAX_COLOR_LUT_ENTRIES 4096
/* Legacy gamm LUT users such as X doesn't like large LUT sizes */
#define MAX_COLOR_LEGACY_LUT_ENTRIES 256
void amdgpu_dm_init_color_mod(void);
+int amdgpu_dm_create_color_properties(struct amdgpu_device *adev);
int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state);
int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc);
int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
+ struct drm_plane_state *plane_state,
struct dc_plane_state *dc_plane_state);
void amdgpu_dm_update_connector_after_detect(
@@ -834,7 +950,7 @@ struct dc_stream_state *
int dm_atomic_get_state(struct drm_atomic_state *state,
struct dm_atomic_state **dm_state);
-struct amdgpu_dm_connector *
+struct drm_connector *
amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index a4cb23d059bd..c87b64e464ed 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -72,6 +72,7 @@
*/
#define MAX_DRM_LUT_VALUE 0xFFFF
+#define SDR_WHITE_LEVEL_INIT_VALUE 80
/**
* amdgpu_dm_init_color_mod - Initialize the color module.
@@ -84,6 +85,247 @@ void amdgpu_dm_init_color_mod(void)
setup_x_points_distribution();
}
+static inline struct fixed31_32 amdgpu_dm_fixpt_from_s3132(__u64 x)
+{
+ struct fixed31_32 val;
+
+ /* If negative, convert to 2's complement. */
+ if (x & (1ULL << 63))
+ x = -(x & ~(1ULL << 63));
+
+ val.value = x;
+ return val;
+}
+
+#ifdef AMD_PRIVATE_COLOR
+/* Pre-defined Transfer Functions (TF)
+ *
+ * AMD driver supports pre-defined mathematical functions for transferring
+ * between encoded values and optical/linear space. Depending on HW color caps,
+ * ROMs and curves built by the AMD color module support these transforms.
+ *
+ * The driver-specific color implementation exposes properties for pre-blending
+ * degamma TF, shaper TF (before 3D LUT), and blend(dpp.ogam) TF and
+ * post-blending regamma (mpc.ogam) TF. However, only pre-blending degamma
+ * supports ROM curves. AMD color module uses pre-defined coefficients to build
+ * curves for the other blocks. What can be done by each color block is
+ * described by struct dpp_color_capsand struct mpc_color_caps.
+ *
+ * AMD driver-specific color API exposes the following pre-defined transfer
+ * functions:
+ *
+ * - Identity: linear/identity relationship between pixel value and
+ * luminance value;
+ * - Gamma 2.2, Gamma 2.4, Gamma 2.6: pure power functions;
+ * - sRGB: 2.4: The piece-wise transfer function from IEC 61966-2-1:1999;
+ * - BT.709: has a linear segment in the bottom part and then a power function
+ * with a 0.45 (~1/2.22) gamma for the rest of the range; standardized by
+ * ITU-R BT.709-6;
+ * - PQ (Perceptual Quantizer): used for HDR display, allows luminance range
+ * capability of 0 to 10,000 nits; standardized by SMPTE ST 2084.
+ *
+ * The AMD color model is designed with an assumption that SDR (sRGB, BT.709,
+ * Gamma 2.2, etc.) peak white maps (normalized to 1.0 FP) to 80 nits in the PQ
+ * system. This has the implication that PQ EOTF (non-linear to linear) maps to
+ * [0.0..125.0] where 125.0 = 10,000 nits / 80 nits.
+ *
+ * Non-linear and linear forms are described in the table below:
+ *
+ * ┌───────────┬─────────────────────┬──────────────────────┐
+ * │ │ Non-linear │ Linear │
+ * ├───────────┼─────────────────────┼──────────────────────┤
+ * │ sRGB │ UNORM or [0.0, 1.0] │ [0.0, 1.0] │
+ * ├───────────┼─────────────────────┼──────────────────────┤
+ * │ BT709 │ UNORM or [0.0, 1.0] │ [0.0, 1.0] │
+ * ├───────────┼─────────────────────┼──────────────────────┤
+ * │ Gamma 2.x │ UNORM or [0.0, 1.0] │ [0.0, 1.0] │
+ * ├───────────┼─────────────────────┼──────────────────────┤
+ * │ PQ │ UNORM or FP16 CCCS* │ [0.0, 125.0] │
+ * ├───────────┼─────────────────────┼──────────────────────┤
+ * │ Identity │ UNORM or FP16 CCCS* │ [0.0, 1.0] or CCCS** │
+ * └───────────┴─────────────────────┴──────────────────────┘
+ * * CCCS: Windows canonical composition color space
+ * ** Respectively
+ *
+ * In the driver-specific API, color block names attached to TF properties
+ * suggest the intention regarding non-linear encoding pixel's luminance
+ * values. As some newer encodings don't use gamma curve, we make encoding and
+ * decoding explicit by defining an enum list of transfer functions supported
+ * in terms of EOTF and inverse EOTF, where:
+ *
+ * - EOTF (electro-optical transfer function): is the transfer function to go
+ * from the encoded value to an optical (linear) value. De-gamma functions
+ * traditionally do this.
+ * - Inverse EOTF (simply the inverse of the EOTF): is usually intended to go
+ * from an optical/linear space (which might have been used for blending)
+ * back to the encoded values. Gamma functions traditionally do this.
+ */
+static const char * const
+amdgpu_transfer_function_names[] = {
+ [AMDGPU_TRANSFER_FUNCTION_DEFAULT] = "Default",
+ [AMDGPU_TRANSFER_FUNCTION_IDENTITY] = "Identity",
+ [AMDGPU_TRANSFER_FUNCTION_SRGB_EOTF] = "sRGB EOTF",
+ [AMDGPU_TRANSFER_FUNCTION_BT709_INV_OETF] = "BT.709 inv_OETF",
+ [AMDGPU_TRANSFER_FUNCTION_PQ_EOTF] = "PQ EOTF",
+ [AMDGPU_TRANSFER_FUNCTION_GAMMA22_EOTF] = "Gamma 2.2 EOTF",
+ [AMDGPU_TRANSFER_FUNCTION_GAMMA24_EOTF] = "Gamma 2.4 EOTF",
+ [AMDGPU_TRANSFER_FUNCTION_GAMMA26_EOTF] = "Gamma 2.6 EOTF",
+ [AMDGPU_TRANSFER_FUNCTION_SRGB_INV_EOTF] = "sRGB inv_EOTF",
+ [AMDGPU_TRANSFER_FUNCTION_BT709_OETF] = "BT.709 OETF",
+ [AMDGPU_TRANSFER_FUNCTION_PQ_INV_EOTF] = "PQ inv_EOTF",
+ [AMDGPU_TRANSFER_FUNCTION_GAMMA22_INV_EOTF] = "Gamma 2.2 inv_EOTF",
+ [AMDGPU_TRANSFER_FUNCTION_GAMMA24_INV_EOTF] = "Gamma 2.4 inv_EOTF",
+ [AMDGPU_TRANSFER_FUNCTION_GAMMA26_INV_EOTF] = "Gamma 2.6 inv_EOTF",
+};
+
+static const u32 amdgpu_eotf =
+ BIT(AMDGPU_TRANSFER_FUNCTION_SRGB_EOTF) |
+ BIT(AMDGPU_TRANSFER_FUNCTION_BT709_INV_OETF) |
+ BIT(AMDGPU_TRANSFER_FUNCTION_PQ_EOTF) |
+ BIT(AMDGPU_TRANSFER_FUNCTION_GAMMA22_EOTF) |
+ BIT(AMDGPU_TRANSFER_FUNCTION_GAMMA24_EOTF) |
+ BIT(AMDGPU_TRANSFER_FUNCTION_GAMMA26_EOTF);
+
+static const u32 amdgpu_inv_eotf =
+ BIT(AMDGPU_TRANSFER_FUNCTION_SRGB_INV_EOTF) |
+ BIT(AMDGPU_TRANSFER_FUNCTION_BT709_OETF) |
+ BIT(AMDGPU_TRANSFER_FUNCTION_PQ_INV_EOTF) |
+ BIT(AMDGPU_TRANSFER_FUNCTION_GAMMA22_INV_EOTF) |
+ BIT(AMDGPU_TRANSFER_FUNCTION_GAMMA24_INV_EOTF) |
+ BIT(AMDGPU_TRANSFER_FUNCTION_GAMMA26_INV_EOTF);
+
+static struct drm_property *
+amdgpu_create_tf_property(struct drm_device *dev,
+ const char *name,
+ u32 supported_tf)
+{
+ u32 transfer_functions = supported_tf |
+ BIT(AMDGPU_TRANSFER_FUNCTION_DEFAULT) |
+ BIT(AMDGPU_TRANSFER_FUNCTION_IDENTITY);
+ struct drm_prop_enum_list enum_list[AMDGPU_TRANSFER_FUNCTION_COUNT];
+ int i, len;
+
+ len = 0;
+ for (i = 0; i < AMDGPU_TRANSFER_FUNCTION_COUNT; i++) {
+ if ((transfer_functions & BIT(i)) == 0)
+ continue;
+
+ enum_list[len].type = i;
+ enum_list[len].name = amdgpu_transfer_function_names[i];
+ len++;
+ }
+
+ return drm_property_create_enum(dev, DRM_MODE_PROP_ENUM,
+ name, enum_list, len);
+}
+
+int
+amdgpu_dm_create_color_properties(struct amdgpu_device *adev)
+{
+ struct drm_property *prop;
+
+ prop = drm_property_create(adev_to_drm(adev),
+ DRM_MODE_PROP_BLOB,
+ "AMD_PLANE_DEGAMMA_LUT", 0);
+ if (!prop)
+ return -ENOMEM;
+ adev->mode_info.plane_degamma_lut_property = prop;
+
+ prop = drm_property_create_range(adev_to_drm(adev),
+ DRM_MODE_PROP_IMMUTABLE,
+ "AMD_PLANE_DEGAMMA_LUT_SIZE",
+ 0, UINT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ adev->mode_info.plane_degamma_lut_size_property = prop;
+
+ prop = amdgpu_create_tf_property(adev_to_drm(adev),
+ "AMD_PLANE_DEGAMMA_TF",
+ amdgpu_eotf);
+ if (!prop)
+ return -ENOMEM;
+ adev->mode_info.plane_degamma_tf_property = prop;
+
+ prop = drm_property_create_range(adev_to_drm(adev),
+ 0, "AMD_PLANE_HDR_MULT", 0, U64_MAX);
+ if (!prop)
+ return -ENOMEM;
+ adev->mode_info.plane_hdr_mult_property = prop;
+
+ prop = drm_property_create(adev_to_drm(adev),
+ DRM_MODE_PROP_BLOB,
+ "AMD_PLANE_CTM", 0);
+ if (!prop)
+ return -ENOMEM;
+ adev->mode_info.plane_ctm_property = prop;
+
+ prop = drm_property_create(adev_to_drm(adev),
+ DRM_MODE_PROP_BLOB,
+ "AMD_PLANE_SHAPER_LUT", 0);
+ if (!prop)
+ return -ENOMEM;
+ adev->mode_info.plane_shaper_lut_property = prop;
+
+ prop = drm_property_create_range(adev_to_drm(adev),
+ DRM_MODE_PROP_IMMUTABLE,
+ "AMD_PLANE_SHAPER_LUT_SIZE", 0, UINT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ adev->mode_info.plane_shaper_lut_size_property = prop;
+
+ prop = amdgpu_create_tf_property(adev_to_drm(adev),
+ "AMD_PLANE_SHAPER_TF",
+ amdgpu_inv_eotf);
+ if (!prop)
+ return -ENOMEM;
+ adev->mode_info.plane_shaper_tf_property = prop;
+
+ prop = drm_property_create(adev_to_drm(adev),
+ DRM_MODE_PROP_BLOB,
+ "AMD_PLANE_LUT3D", 0);
+ if (!prop)
+ return -ENOMEM;
+ adev->mode_info.plane_lut3d_property = prop;
+
+ prop = drm_property_create_range(adev_to_drm(adev),
+ DRM_MODE_PROP_IMMUTABLE,
+ "AMD_PLANE_LUT3D_SIZE", 0, UINT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ adev->mode_info.plane_lut3d_size_property = prop;
+
+ prop = drm_property_create(adev_to_drm(adev),
+ DRM_MODE_PROP_BLOB,
+ "AMD_PLANE_BLEND_LUT", 0);
+ if (!prop)
+ return -ENOMEM;
+ adev->mode_info.plane_blend_lut_property = prop;
+
+ prop = drm_property_create_range(adev_to_drm(adev),
+ DRM_MODE_PROP_IMMUTABLE,
+ "AMD_PLANE_BLEND_LUT_SIZE", 0, UINT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ adev->mode_info.plane_blend_lut_size_property = prop;
+
+ prop = amdgpu_create_tf_property(adev_to_drm(adev),
+ "AMD_PLANE_BLEND_TF",
+ amdgpu_eotf);
+ if (!prop)
+ return -ENOMEM;
+ adev->mode_info.plane_blend_tf_property = prop;
+
+ prop = amdgpu_create_tf_property(adev_to_drm(adev),
+ "AMD_CRTC_REGAMMA_TF",
+ amdgpu_inv_eotf);
+ if (!prop)
+ return -ENOMEM;
+ adev->mode_info.regamma_tf_property = prop;
+
+ return 0;
+}
+#endif
+
/**
* __extract_blob_lut - Extracts the DRM lut and lut size from a blob.
* @blob: DRM color mgmt property blob
@@ -182,7 +424,6 @@ static void __drm_lut_to_dc_gamma(const struct drm_color_lut *lut,
static void __drm_ctm_to_dc_matrix(const struct drm_color_ctm *ctm,
struct fixed31_32 *matrix)
{
- int64_t val;
int i;
/*
@@ -201,12 +442,29 @@ static void __drm_ctm_to_dc_matrix(const struct drm_color_ctm *ctm,
}
/* gamut_remap_matrix[i] = ctm[i - floor(i/4)] */
- val = ctm->matrix[i - (i / 4)];
- /* If negative, convert to 2's complement. */
- if (val & (1ULL << 63))
- val = -(val & ~(1ULL << 63));
+ matrix[i] = amdgpu_dm_fixpt_from_s3132(ctm->matrix[i - (i / 4)]);
+ }
+}
- matrix[i].value = val;
+/**
+ * __drm_ctm_3x4_to_dc_matrix - converts a DRM CTM 3x4 to a DC CSC float matrix
+ * @ctm: DRM color transformation matrix with 3x4 dimensions
+ * @matrix: DC CSC float matrix
+ *
+ * The matrix needs to be a 3x4 (12 entry) matrix.
+ */
+static void __drm_ctm_3x4_to_dc_matrix(const struct drm_color_ctm_3x4 *ctm,
+ struct fixed31_32 *matrix)
+{
+ int i;
+
+ /* The format provided is S31.32, using signed-magnitude representation.
+ * Our fixed31_32 is also S31.32, but is using 2's complement. We have
+ * to convert from signed-magnitude to 2's complement.
+ */
+ for (i = 0; i < 12; i++) {
+ /* gamut_remap_matrix[i] = ctm[i - floor(i/4)] */
+ matrix[i] = amdgpu_dm_fixpt_from_s3132(ctm->matrix[i]);
}
}
@@ -268,16 +526,18 @@ static int __set_output_tf(struct dc_transfer_func *func,
struct calculate_buffer cal_buffer = {0};
bool res;
- ASSERT(lut && lut_size == MAX_COLOR_LUT_ENTRIES);
-
cal_buffer.buffer_index = -1;
- gamma = dc_create_gamma();
- if (!gamma)
- return -ENOMEM;
+ if (lut_size) {
+ ASSERT(lut && lut_size == MAX_COLOR_LUT_ENTRIES);
- gamma->num_entries = lut_size;
- __drm_lut_to_dc_gamma(lut, gamma, false);
+ gamma = dc_create_gamma();
+ if (!gamma)
+ return -ENOMEM;
+
+ gamma->num_entries = lut_size;
+ __drm_lut_to_dc_gamma(lut, gamma, false);
+ }
if (func->tf == TRANSFER_FUNCTION_LINEAR) {
/*
@@ -285,27 +545,68 @@ static int __set_output_tf(struct dc_transfer_func *func,
* on top of a linear input. But degamma params can be used
* instead to simulate this.
*/
- gamma->type = GAMMA_CUSTOM;
+ if (gamma)
+ gamma->type = GAMMA_CUSTOM;
res = mod_color_calculate_degamma_params(NULL, func,
- gamma, true);
+ gamma, gamma != NULL);
} else {
/*
* Assume sRGB. The actual mapping will depend on whether the
* input was legacy or not.
*/
- gamma->type = GAMMA_CS_TFM_1D;
- res = mod_color_calculate_regamma_params(func, gamma, false,
+ if (gamma)
+ gamma->type = GAMMA_CS_TFM_1D;
+ res = mod_color_calculate_regamma_params(func, gamma, gamma != NULL,
has_rom, NULL, &cal_buffer);
}
- dc_gamma_release(&gamma);
+ if (gamma)
+ dc_gamma_release(&gamma);
return res ? 0 : -ENOMEM;
}
+static int amdgpu_dm_set_atomic_regamma(struct dc_stream_state *stream,
+ const struct drm_color_lut *regamma_lut,
+ uint32_t regamma_size, bool has_rom,
+ enum dc_transfer_func_predefined tf)
+{
+ struct dc_transfer_func *out_tf = stream->out_transfer_func;
+ int ret = 0;
+
+ if (regamma_size || tf != TRANSFER_FUNCTION_LINEAR) {
+ /*
+ * CRTC RGM goes into RGM LUT.
+ *
+ * Note: there is no implicit sRGB regamma here. We are using
+ * degamma calculation from color module to calculate the curve
+ * from a linear base if gamma TF is not set. However, if gamma
+ * TF (!= Linear) and LUT are set at the same time, we will use
+ * regamma calculation, and the color module will combine the
+ * pre-defined TF and the custom LUT values into the LUT that's
+ * actually programmed.
+ */
+ out_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
+ out_tf->tf = tf;
+ out_tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE;
+
+ ret = __set_output_tf(out_tf, regamma_lut, regamma_size, has_rom);
+ } else {
+ /*
+ * No CRTC RGM means we can just put the block into bypass
+ * since we don't have any plane level adjustments using it.
+ */
+ out_tf->type = TF_TYPE_BYPASS;
+ out_tf->tf = TRANSFER_FUNCTION_LINEAR;
+ }
+
+ return ret;
+}
+
/**
* __set_input_tf - calculates the input transfer function based on expected
* input space.
+ * @caps: dc color capabilities
* @func: transfer function
* @lut: lookup table that defines the color space
* @lut_size: size of respective lut.
@@ -313,27 +614,240 @@ static int __set_output_tf(struct dc_transfer_func *func,
* Returns:
* 0 in case of success. -ENOMEM if fails.
*/
-static int __set_input_tf(struct dc_transfer_func *func,
+static int __set_input_tf(struct dc_color_caps *caps, struct dc_transfer_func *func,
const struct drm_color_lut *lut, uint32_t lut_size)
{
struct dc_gamma *gamma = NULL;
bool res;
- gamma = dc_create_gamma();
- if (!gamma)
- return -ENOMEM;
+ if (lut_size) {
+ gamma = dc_create_gamma();
+ if (!gamma)
+ return -ENOMEM;
- gamma->type = GAMMA_CUSTOM;
- gamma->num_entries = lut_size;
+ gamma->type = GAMMA_CUSTOM;
+ gamma->num_entries = lut_size;
+
+ __drm_lut_to_dc_gamma(lut, gamma, false);
+ }
- __drm_lut_to_dc_gamma(lut, gamma, false);
+ res = mod_color_calculate_degamma_params(caps, func, gamma, gamma != NULL);
- res = mod_color_calculate_degamma_params(NULL, func, gamma, true);
- dc_gamma_release(&gamma);
+ if (gamma)
+ dc_gamma_release(&gamma);
return res ? 0 : -ENOMEM;
}
+static enum dc_transfer_func_predefined
+amdgpu_tf_to_dc_tf(enum amdgpu_transfer_function tf)
+{
+ switch (tf) {
+ default:
+ case AMDGPU_TRANSFER_FUNCTION_DEFAULT:
+ case AMDGPU_TRANSFER_FUNCTION_IDENTITY:
+ return TRANSFER_FUNCTION_LINEAR;
+ case AMDGPU_TRANSFER_FUNCTION_SRGB_EOTF:
+ case AMDGPU_TRANSFER_FUNCTION_SRGB_INV_EOTF:
+ return TRANSFER_FUNCTION_SRGB;
+ case AMDGPU_TRANSFER_FUNCTION_BT709_OETF:
+ case AMDGPU_TRANSFER_FUNCTION_BT709_INV_OETF:
+ return TRANSFER_FUNCTION_BT709;
+ case AMDGPU_TRANSFER_FUNCTION_PQ_EOTF:
+ case AMDGPU_TRANSFER_FUNCTION_PQ_INV_EOTF:
+ return TRANSFER_FUNCTION_PQ;
+ case AMDGPU_TRANSFER_FUNCTION_GAMMA22_EOTF:
+ case AMDGPU_TRANSFER_FUNCTION_GAMMA22_INV_EOTF:
+ return TRANSFER_FUNCTION_GAMMA22;
+ case AMDGPU_TRANSFER_FUNCTION_GAMMA24_EOTF:
+ case AMDGPU_TRANSFER_FUNCTION_GAMMA24_INV_EOTF:
+ return TRANSFER_FUNCTION_GAMMA24;
+ case AMDGPU_TRANSFER_FUNCTION_GAMMA26_EOTF:
+ case AMDGPU_TRANSFER_FUNCTION_GAMMA26_INV_EOTF:
+ return TRANSFER_FUNCTION_GAMMA26;
+ }
+}
+
+static void __to_dc_lut3d_color(struct dc_rgb *rgb,
+ const struct drm_color_lut lut,
+ int bit_precision)
+{
+ rgb->red = drm_color_lut_extract(lut.red, bit_precision);
+ rgb->green = drm_color_lut_extract(lut.green, bit_precision);
+ rgb->blue = drm_color_lut_extract(lut.blue, bit_precision);
+}
+
+static void __drm_3dlut_to_dc_3dlut(const struct drm_color_lut *lut,
+ uint32_t lut3d_size,
+ struct tetrahedral_params *params,
+ bool use_tetrahedral_9,
+ int bit_depth)
+{
+ struct dc_rgb *lut0;
+ struct dc_rgb *lut1;
+ struct dc_rgb *lut2;
+ struct dc_rgb *lut3;
+ int lut_i, i;
+
+
+ if (use_tetrahedral_9) {
+ lut0 = params->tetrahedral_9.lut0;
+ lut1 = params->tetrahedral_9.lut1;
+ lut2 = params->tetrahedral_9.lut2;
+ lut3 = params->tetrahedral_9.lut3;
+ } else {
+ lut0 = params->tetrahedral_17.lut0;
+ lut1 = params->tetrahedral_17.lut1;
+ lut2 = params->tetrahedral_17.lut2;
+ lut3 = params->tetrahedral_17.lut3;
+ }
+
+ for (lut_i = 0, i = 0; i < lut3d_size - 4; lut_i++, i += 4) {
+ /*
+ * We should consider the 3D LUT RGB values are distributed
+ * along four arrays lut0-3 where the first sizes 1229 and the
+ * other 1228. The bit depth supported for 3dlut channel is
+ * 12-bit, but DC also supports 10-bit.
+ *
+ * TODO: improve color pipeline API to enable the userspace set
+ * bit depth and 3D LUT size/stride, as specified by VA-API.
+ */
+ __to_dc_lut3d_color(&lut0[lut_i], lut[i], bit_depth);
+ __to_dc_lut3d_color(&lut1[lut_i], lut[i + 1], bit_depth);
+ __to_dc_lut3d_color(&lut2[lut_i], lut[i + 2], bit_depth);
+ __to_dc_lut3d_color(&lut3[lut_i], lut[i + 3], bit_depth);
+ }
+ /* lut0 has 1229 points (lut_size/4 + 1) */
+ __to_dc_lut3d_color(&lut0[lut_i], lut[i], bit_depth);
+}
+
+/* amdgpu_dm_atomic_lut3d - set DRM 3D LUT to DC stream
+ * @drm_lut3d: user 3D LUT
+ * @drm_lut3d_size: size of 3D LUT
+ * @lut3d: DC 3D LUT
+ *
+ * Map user 3D LUT data to DC 3D LUT and all necessary bits to program it
+ * on DCN accordingly.
+ */
+static void amdgpu_dm_atomic_lut3d(const struct drm_color_lut *drm_lut3d,
+ uint32_t drm_lut3d_size,
+ struct dc_3dlut *lut)
+{
+ if (!drm_lut3d_size) {
+ lut->state.bits.initialized = 0;
+ } else {
+ /* Stride and bit depth are not programmable by API yet.
+ * Therefore, only supports 17x17x17 3D LUT (12-bit).
+ */
+ lut->lut_3d.use_tetrahedral_9 = false;
+ lut->lut_3d.use_12bits = true;
+ lut->state.bits.initialized = 1;
+ __drm_3dlut_to_dc_3dlut(drm_lut3d, drm_lut3d_size, &lut->lut_3d,
+ lut->lut_3d.use_tetrahedral_9,
+ MAX_COLOR_3DLUT_BITDEPTH);
+ }
+}
+
+static int amdgpu_dm_atomic_shaper_lut(const struct drm_color_lut *shaper_lut,
+ bool has_rom,
+ enum dc_transfer_func_predefined tf,
+ uint32_t shaper_size,
+ struct dc_transfer_func *func_shaper)
+{
+ int ret = 0;
+
+ if (shaper_size || tf != TRANSFER_FUNCTION_LINEAR) {
+ /*
+ * If user shaper LUT is set, we assume a linear color space
+ * (linearized by degamma 1D LUT or not).
+ */
+ func_shaper->type = TF_TYPE_DISTRIBUTED_POINTS;
+ func_shaper->tf = tf;
+ func_shaper->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE;
+
+ ret = __set_output_tf(func_shaper, shaper_lut, shaper_size, has_rom);
+ } else {
+ func_shaper->type = TF_TYPE_BYPASS;
+ func_shaper->tf = TRANSFER_FUNCTION_LINEAR;
+ }
+
+ return ret;
+}
+
+static int amdgpu_dm_atomic_blend_lut(const struct drm_color_lut *blend_lut,
+ bool has_rom,
+ enum dc_transfer_func_predefined tf,
+ uint32_t blend_size,
+ struct dc_transfer_func *func_blend)
+{
+ int ret = 0;
+
+ if (blend_size || tf != TRANSFER_FUNCTION_LINEAR) {
+ /*
+ * DRM plane gamma LUT or TF means we are linearizing color
+ * space before blending (similar to degamma programming). As
+ * we don't have hardcoded curve support, or we use AMD color
+ * module to fill the parameters that will be translated to HW
+ * points.
+ */
+ func_blend->type = TF_TYPE_DISTRIBUTED_POINTS;
+ func_blend->tf = tf;
+ func_blend->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE;
+
+ ret = __set_input_tf(NULL, func_blend, blend_lut, blend_size);
+ } else {
+ func_blend->type = TF_TYPE_BYPASS;
+ func_blend->tf = TRANSFER_FUNCTION_LINEAR;
+ }
+
+ return ret;
+}
+
+/**
+ * amdgpu_dm_verify_lut3d_size - verifies if 3D LUT is supported and if user
+ * shaper and 3D LUTs match the hw supported size
+ * @adev: amdgpu device
+ * @plane_state: the DRM plane state
+ *
+ * Verifies if pre-blending (DPP) 3D LUT is supported by the HW (DCN 2.0 or
+ * newer) and if the user shaper and 3D LUTs match the supported size.
+ *
+ * Returns:
+ * 0 on success. -EINVAL if lut size are invalid.
+ */
+int amdgpu_dm_verify_lut3d_size(struct amdgpu_device *adev,
+ struct drm_plane_state *plane_state)
+{
+ struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state);
+ const struct drm_color_lut *shaper = NULL, *lut3d = NULL;
+ uint32_t exp_size, size, dim_size = MAX_COLOR_3DLUT_SIZE;
+ bool has_3dlut = adev->dm.dc->caps.color.dpp.hw_3d_lut;
+
+ /* shaper LUT is only available if 3D LUT color caps */
+ exp_size = has_3dlut ? MAX_COLOR_LUT_ENTRIES : 0;
+ shaper = __extract_blob_lut(dm_plane_state->shaper_lut, &size);
+
+ if (shaper && size != exp_size) {
+ drm_dbg(&adev->ddev,
+ "Invalid Shaper LUT size. Should be %u but got %u.\n",
+ exp_size, size);
+ return -EINVAL;
+ }
+
+ /* The number of 3D LUT entries is the dimension size cubed */
+ exp_size = has_3dlut ? dim_size * dim_size * dim_size : 0;
+ lut3d = __extract_blob_lut(dm_plane_state->lut3d, &size);
+
+ if (lut3d && size != exp_size) {
+ drm_dbg(&adev->ddev,
+ "Invalid 3D LUT size. Should be %u but got %u.\n",
+ exp_size, size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/**
* amdgpu_dm_verify_lut_sizes - verifies if DRM luts match the hw supported sizes
* @crtc_state: the DRM CRTC state
@@ -401,9 +915,12 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
const struct drm_color_lut *degamma_lut, *regamma_lut;
uint32_t degamma_size, regamma_size;
bool has_regamma, has_degamma;
+ enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_LINEAR;
bool is_legacy;
int r;
+ tf = amdgpu_tf_to_dc_tf(crtc->regamma_tf);
+
r = amdgpu_dm_verify_lut_sizes(&crtc->base);
if (r)
return r;
@@ -439,27 +956,23 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
crtc->cm_is_degamma_srgb = true;
stream->out_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS;
stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
-
+ /*
+ * Note: although we pass has_rom as parameter here, we never
+ * actually use ROM because the color module only takes the ROM
+ * path if transfer_func->type == PREDEFINED.
+ *
+ * See more in mod_color_calculate_regamma_params()
+ */
r = __set_legacy_tf(stream->out_transfer_func, regamma_lut,
regamma_size, has_rom);
if (r)
return r;
- } else if (has_regamma) {
- /* If atomic regamma, CRTC RGM goes into RGM LUT. */
- stream->out_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS;
- stream->out_transfer_func->tf = TRANSFER_FUNCTION_LINEAR;
-
- r = __set_output_tf(stream->out_transfer_func, regamma_lut,
- regamma_size, has_rom);
+ } else {
+ regamma_size = has_regamma ? regamma_size : 0;
+ r = amdgpu_dm_set_atomic_regamma(stream, regamma_lut,
+ regamma_size, has_rom, tf);
if (r)
return r;
- } else {
- /*
- * No CRTC RGM means we can just put the block into bypass
- * since we don't have any plane level adjustments using it.
- */
- stream->out_transfer_func->type = TF_TYPE_BYPASS;
- stream->out_transfer_func->tf = TRANSFER_FUNCTION_LINEAR;
}
/*
@@ -495,20 +1008,10 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
return 0;
}
-/**
- * amdgpu_dm_update_plane_color_mgmt: Maps DRM color management to DC plane.
- * @crtc: amdgpu_dm crtc state
- * @dc_plane_state: target DC surface
- *
- * Update the underlying dc_stream_state's input transfer function (ITF) in
- * preparation for hardware commit. The transfer function used depends on
- * the preparation done on the stream for color management.
- *
- * Returns:
- * 0 on success. -ENOMEM if mem allocation fails.
- */
-int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
- struct dc_plane_state *dc_plane_state)
+static int
+map_crtc_degamma_to_dc_plane(struct dm_crtc_state *crtc,
+ struct dc_plane_state *dc_plane_state,
+ struct dc_color_caps *caps)
{
const struct drm_color_lut *degamma_lut;
enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB;
@@ -531,8 +1034,7 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
&degamma_size);
ASSERT(degamma_size == MAX_COLOR_LUT_ENTRIES);
- dc_plane_state->in_transfer_func->type =
- TF_TYPE_DISTRIBUTED_POINTS;
+ dc_plane_state->in_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS;
/*
* This case isn't fully correct, but also fairly
@@ -564,11 +1066,11 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
dc_plane_state->in_transfer_func->tf =
TRANSFER_FUNCTION_LINEAR;
- r = __set_input_tf(dc_plane_state->in_transfer_func,
+ r = __set_input_tf(caps, dc_plane_state->in_transfer_func,
degamma_lut, degamma_size);
if (r)
return r;
- } else if (crtc->cm_is_degamma_srgb) {
+ } else {
/*
* For legacy gamma support we need the regamma input
* in linear space. Assume that the input is sRGB.
@@ -577,14 +1079,209 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
dc_plane_state->in_transfer_func->tf = tf;
if (tf != TRANSFER_FUNCTION_SRGB &&
- !mod_color_calculate_degamma_params(NULL,
- dc_plane_state->in_transfer_func, NULL, false))
+ !mod_color_calculate_degamma_params(caps,
+ dc_plane_state->in_transfer_func,
+ NULL, false))
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int
+__set_dm_plane_degamma(struct drm_plane_state *plane_state,
+ struct dc_plane_state *dc_plane_state,
+ struct dc_color_caps *color_caps)
+{
+ struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state);
+ const struct drm_color_lut *degamma_lut;
+ enum amdgpu_transfer_function tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT;
+ uint32_t degamma_size;
+ bool has_degamma_lut;
+ int ret;
+
+ degamma_lut = __extract_blob_lut(dm_plane_state->degamma_lut,
+ &degamma_size);
+
+ has_degamma_lut = degamma_lut &&
+ !__is_lut_linear(degamma_lut, degamma_size);
+
+ tf = dm_plane_state->degamma_tf;
+
+ /* If we don't have plane degamma LUT nor TF to set on DC, we have
+ * nothing to do here, return.
+ */
+ if (!has_degamma_lut && tf == AMDGPU_TRANSFER_FUNCTION_DEFAULT)
+ return -EINVAL;
+
+ dc_plane_state->in_transfer_func->tf = amdgpu_tf_to_dc_tf(tf);
+
+ if (has_degamma_lut) {
+ ASSERT(degamma_size == MAX_COLOR_LUT_ENTRIES);
+
+ dc_plane_state->in_transfer_func->type =
+ TF_TYPE_DISTRIBUTED_POINTS;
+
+ ret = __set_input_tf(color_caps, dc_plane_state->in_transfer_func,
+ degamma_lut, degamma_size);
+ if (ret)
+ return ret;
+ } else {
+ dc_plane_state->in_transfer_func->type =
+ TF_TYPE_PREDEFINED;
+
+ if (!mod_color_calculate_degamma_params(color_caps,
+ dc_plane_state->in_transfer_func, NULL, false))
return -ENOMEM;
- } else {
- /* ...Otherwise we can just bypass the DGM block. */
- dc_plane_state->in_transfer_func->type = TF_TYPE_BYPASS;
- dc_plane_state->in_transfer_func->tf = TRANSFER_FUNCTION_LINEAR;
+ }
+ return 0;
+}
+
+static int
+amdgpu_dm_plane_set_color_properties(struct drm_plane_state *plane_state,
+ struct dc_plane_state *dc_plane_state)
+{
+ struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state);
+ enum amdgpu_transfer_function shaper_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT;
+ enum amdgpu_transfer_function blend_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT;
+ const struct drm_color_lut *shaper_lut, *lut3d, *blend_lut;
+ uint32_t shaper_size, lut3d_size, blend_size;
+ int ret;
+
+ dc_plane_state->hdr_mult = amdgpu_dm_fixpt_from_s3132(dm_plane_state->hdr_mult);
+
+ shaper_lut = __extract_blob_lut(dm_plane_state->shaper_lut, &shaper_size);
+ shaper_size = shaper_lut != NULL ? shaper_size : 0;
+ shaper_tf = dm_plane_state->shaper_tf;
+ lut3d = __extract_blob_lut(dm_plane_state->lut3d, &lut3d_size);
+ lut3d_size = lut3d != NULL ? lut3d_size : 0;
+
+ amdgpu_dm_atomic_lut3d(lut3d, lut3d_size, dc_plane_state->lut3d_func);
+ ret = amdgpu_dm_atomic_shaper_lut(shaper_lut, false,
+ amdgpu_tf_to_dc_tf(shaper_tf),
+ shaper_size,
+ dc_plane_state->in_shaper_func);
+ if (ret) {
+ drm_dbg_kms(plane_state->plane->dev,
+ "setting plane %d shaper LUT failed.\n",
+ plane_state->plane->index);
+
+ return ret;
+ }
+
+ blend_tf = dm_plane_state->blend_tf;
+ blend_lut = __extract_blob_lut(dm_plane_state->blend_lut, &blend_size);
+ blend_size = blend_lut != NULL ? blend_size : 0;
+
+ ret = amdgpu_dm_atomic_blend_lut(blend_lut, false,
+ amdgpu_tf_to_dc_tf(blend_tf),
+ blend_size, dc_plane_state->blend_tf);
+ if (ret) {
+ drm_dbg_kms(plane_state->plane->dev,
+ "setting plane %d gamma lut failed.\n",
+ plane_state->plane->index);
+
+ return ret;
}
return 0;
}
+
+/**
+ * amdgpu_dm_update_plane_color_mgmt: Maps DRM color management to DC plane.
+ * @crtc: amdgpu_dm crtc state
+ * @plane_state: DRM plane state
+ * @dc_plane_state: target DC surface
+ *
+ * Update the underlying dc_stream_state's input transfer function (ITF) in
+ * preparation for hardware commit. The transfer function used depends on
+ * the preparation done on the stream for color management.
+ *
+ * Returns:
+ * 0 on success. -ENOMEM if mem allocation fails.
+ */
+int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
+ struct drm_plane_state *plane_state,
+ struct dc_plane_state *dc_plane_state)
+{
+ struct amdgpu_device *adev = drm_to_adev(crtc->base.state->dev);
+ struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state);
+ struct drm_color_ctm_3x4 *ctm = NULL;
+ struct dc_color_caps *color_caps = NULL;
+ bool has_crtc_cm_degamma;
+ int ret;
+
+ ret = amdgpu_dm_verify_lut3d_size(adev, plane_state);
+ if (ret) {
+ drm_dbg_driver(&adev->ddev, "amdgpu_dm_verify_lut3d_size() failed\n");
+ return ret;
+ }
+
+ if (dc_plane_state->ctx && dc_plane_state->ctx->dc)
+ color_caps = &dc_plane_state->ctx->dc->caps.color;
+
+ /* Initially, we can just bypass the DGM block. */
+ dc_plane_state->in_transfer_func->type = TF_TYPE_BYPASS;
+ dc_plane_state->in_transfer_func->tf = TRANSFER_FUNCTION_LINEAR;
+
+ /* After, we start to update values according to color props */
+ has_crtc_cm_degamma = (crtc->cm_has_degamma || crtc->cm_is_degamma_srgb);
+
+ ret = __set_dm_plane_degamma(plane_state, dc_plane_state, color_caps);
+ if (ret == -ENOMEM)
+ return ret;
+
+ /* We only have one degamma block available (pre-blending) for the
+ * whole color correction pipeline, so that we can't actually perform
+ * plane and CRTC degamma at the same time. Explicitly reject atomic
+ * updates when userspace sets both plane and CRTC degamma properties.
+ */
+ if (has_crtc_cm_degamma && ret != -EINVAL) {
+ drm_dbg_kms(crtc->base.crtc->dev,
+ "doesn't support plane and CRTC degamma at the same time\n");
+ return -EINVAL;
+ }
+
+ /* If we are here, it means we don't have plane degamma settings, check
+ * if we have CRTC degamma waiting for mapping to pre-blending degamma
+ * block
+ */
+ if (has_crtc_cm_degamma) {
+ /*
+ * AMD HW doesn't have post-blending degamma caps. When DRM
+ * CRTC atomic degamma is set, we maps it to DPP degamma block
+ * (pre-blending) or, on legacy gamma, we use DPP degamma to
+ * linearize (implicit degamma) from sRGB/BT709 according to
+ * the input space.
+ */
+ ret = map_crtc_degamma_to_dc_plane(crtc, dc_plane_state, color_caps);
+ if (ret)
+ return ret;
+ }
+
+ /* Setup CRTC CTM. */
+ if (dm_plane_state->ctm) {
+ ctm = (struct drm_color_ctm_3x4 *)dm_plane_state->ctm->data;
+ /*
+ * DCN2 and older don't support both pre-blending and
+ * post-blending gamut remap. For this HW family, if we have
+ * the plane and CRTC CTMs simultaneously, CRTC CTM takes
+ * priority, and we discard plane CTM, as implemented in
+ * dcn10_program_gamut_remap(). However, DCN3+ has DPP
+ * (pre-blending) and MPC (post-blending) `gamut remap` blocks;
+ * therefore, we can program plane and CRTC CTMs together by
+ * mapping CRTC CTM to MPC and keeping plane CTM setup at DPP,
+ * as it's done by dcn30_program_gamut_remap().
+ */
+ __drm_ctm_3x4_to_dc_matrix(ctm, dc_plane_state->gamut_remap_matrix.matrix);
+
+ dc_plane_state->gamut_remap_matrix.enable_remap = true;
+ dc_plane_state->input_csc_color_matrix.enable_adjustment = false;
+ } else {
+ /* Bypass CTM. */
+ dc_plane_state->gamut_remap_matrix.enable_remap = false;
+ dc_plane_state->input_csc_color_matrix.enable_adjustment = false;
+ }
+
+ return amdgpu_dm_plane_set_color_properties(plane_state, dc_plane_state);
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index 52ecfa746b54..f936a35fa9eb 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -326,6 +326,9 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
if (!connector->state || connector->state->crtc != crtc)
continue;
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
aconn = to_amdgpu_dm_connector(connector);
break;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index cb0b48bb2a7d..6e715ef3a556 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -29,7 +29,6 @@
#include "dc.h"
#include "amdgpu.h"
#include "amdgpu_dm_psr.h"
-#include "amdgpu_dm_replay.h"
#include "amdgpu_dm_crtc.h"
#include "amdgpu_dm_plane.h"
#include "amdgpu_dm_trace.h"
@@ -124,12 +123,7 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
* fill_dc_dirty_rects().
*/
if (vblank_work->stream && vblank_work->stream->link) {
- /*
- * Prioritize replay, instead of psr
- */
- if (vblank_work->stream->link->replay_settings.replay_feature_enabled)
- amdgpu_dm_replay_enable(vblank_work->stream, false);
- else if (vblank_work->enable) {
+ if (vblank_work->enable) {
if (vblank_work->stream->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 &&
vblank_work->stream->link->psr_settings.psr_allow_active)
amdgpu_dm_psr_disable(vblank_work->stream);
@@ -138,7 +132,6 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
!amdgpu_dm_crc_window_is_activated(&vblank_work->acrtc->base) &&
#endif
- vblank_work->stream->link->panel_config.psr.disallow_replay &&
vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
amdgpu_dm_psr_enable(vblank_work->stream);
}
@@ -260,6 +253,7 @@ static struct drm_crtc_state *amdgpu_dm_crtc_duplicate_state(struct drm_crtc *cr
state->freesync_config = cur->freesync_config;
state->cm_has_degamma = cur->cm_has_degamma;
state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
+ state->regamma_tf = cur->regamma_tf;
state->crc_skip_count = cur->crc_skip_count;
state->mpo_requested = cur->mpo_requested;
/* TODO Duplicate dc_stream after objects are stream object is flattened */
@@ -296,6 +290,70 @@ static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
}
#endif
+#ifdef AMD_PRIVATE_COLOR
+/**
+ * dm_crtc_additional_color_mgmt - enable additional color properties
+ * @crtc: DRM CRTC
+ *
+ * This function lets the driver enable post-blending CRTC regamma transfer
+ * function property in addition to DRM CRTC gamma LUT. Default value means
+ * linear transfer function, which is the default CRTC gamma LUT behaviour
+ * without this property.
+ */
+static void
+dm_crtc_additional_color_mgmt(struct drm_crtc *crtc)
+{
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
+
+ if (adev->dm.dc->caps.color.mpc.ogam_ram)
+ drm_object_attach_property(&crtc->base,
+ adev->mode_info.regamma_tf_property,
+ AMDGPU_TRANSFER_FUNCTION_DEFAULT);
+}
+
+static int
+amdgpu_dm_atomic_crtc_set_property(struct drm_crtc *crtc,
+ struct drm_crtc_state *state,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
+ struct dm_crtc_state *acrtc_state = to_dm_crtc_state(state);
+
+ if (property == adev->mode_info.regamma_tf_property) {
+ if (acrtc_state->regamma_tf != val) {
+ acrtc_state->regamma_tf = val;
+ acrtc_state->base.color_mgmt_changed |= 1;
+ }
+ } else {
+ drm_dbg_atomic(crtc->dev,
+ "[CRTC:%d:%s] unknown property [PROP:%d:%s]]\n",
+ crtc->base.id, crtc->name,
+ property->base.id, property->name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+amdgpu_dm_atomic_crtc_get_property(struct drm_crtc *crtc,
+ const struct drm_crtc_state *state,
+ struct drm_property *property,
+ uint64_t *val)
+{
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
+ struct dm_crtc_state *acrtc_state = to_dm_crtc_state(state);
+
+ if (property == adev->mode_info.regamma_tf_property)
+ *val = acrtc_state->regamma_tf;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+#endif
+
/* Implemented only the options currently available for the driver */
static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
.reset = amdgpu_dm_crtc_reset_state,
@@ -314,6 +372,10 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
#if defined(CONFIG_DEBUG_FS)
.late_register = amdgpu_dm_crtc_late_register,
#endif
+#ifdef AMD_PRIVATE_COLOR
+ .atomic_set_property = amdgpu_dm_atomic_crtc_set_property,
+ .atomic_get_property = amdgpu_dm_atomic_crtc_get_property,
+#endif
};
static void amdgpu_dm_crtc_helper_disable(struct drm_crtc *crtc)
@@ -489,6 +551,9 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
+#ifdef AMD_PRIVATE_COLOR
+ dm_crtc_additional_color_mgmt(&acrtc->base);
+#endif
return 0;
fail:
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 98b41ec7288e..68a846323912 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -2976,7 +2976,6 @@ static int dmub_trace_mask_set(void *data, u64 val)
struct amdgpu_device *adev = data;
struct dmub_srv *srv = adev->dm.dc->ctx->dmub_srv->dmub;
enum dmub_gpint_command cmd;
- enum dmub_status status;
u64 mask = 0xffff;
u8 shift = 0;
u32 res;
@@ -3003,13 +3002,7 @@ static int dmub_trace_mask_set(void *data, u64 val)
break;
}
- status = dmub_srv_send_gpint_command(srv, cmd, res, 30);
-
- if (status == DMUB_STATUS_TIMEOUT)
- return -ETIMEDOUT;
- else if (status == DMUB_STATUS_INVALID)
- return -EINVAL;
- else if (status != DMUB_STATUS_OK)
+ if (!dc_wake_and_execute_gpint(adev->dm.dc->ctx, cmd, res, NULL, DM_DMUB_WAIT_TYPE_WAIT))
return -EIO;
usleep_range(100, 1000);
@@ -3026,7 +3019,6 @@ static int dmub_trace_mask_show(void *data, u64 *val)
enum dmub_gpint_command cmd = DMUB_GPINT__GET_TRACE_BUFFER_MASK_WORD0;
struct amdgpu_device *adev = data;
struct dmub_srv *srv = adev->dm.dc->ctx->dmub_srv->dmub;
- enum dmub_status status;
u8 shift = 0;
u64 raw = 0;
u64 res = 0;
@@ -3036,23 +3028,12 @@ static int dmub_trace_mask_show(void *data, u64 *val)
return -EINVAL;
while (i < 4) {
- status = dmub_srv_send_gpint_command(srv, cmd, 0, 30);
-
- if (status == DMUB_STATUS_OK) {
- status = dmub_srv_get_gpint_response(srv, (u32 *) &raw);
-
- if (status == DMUB_STATUS_INVALID)
- return -EINVAL;
- else if (status != DMUB_STATUS_OK)
- return -EIO;
- } else if (status == DMUB_STATUS_TIMEOUT) {
- return -ETIMEDOUT;
- } else if (status == DMUB_STATUS_INVALID) {
- return -EINVAL;
- } else {
+ uint32_t response;
+
+ if (!dc_wake_and_execute_gpint(adev->dm.dc->ctx, cmd, 0, &response, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
return -EIO;
- }
+ raw = response;
usleep_range(100, 1000);
cmd++;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index b4696ec621c4..85b7f58a7f35 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -64,6 +64,12 @@ static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps)
DRM_DEBUG_DRIVER("Disabling FAMS on monitor with panel id %X\n", panel_id);
edid_caps->panel_patch.disable_fams = true;
break;
+ /* Workaround for some monitors that do not clear DPCD 0x317 if FreeSync is unsupported */
+ case drm_edid_encode_panel_id('A', 'U', 'O', 0xA7AB):
+ case drm_edid_encode_panel_id('A', 'U', 'O', 0xE69B):
+ DRM_DEBUG_DRIVER("Clearing DPCD 0x317 on monitor with panel id %X\n", panel_id);
+ edid_caps->panel_patch.remove_sink_ext_caps = true;
+ break;
default:
return;
}
@@ -973,6 +979,11 @@ int dm_helper_dmub_aux_transfer_sync(
struct aux_payload *payload,
enum aux_return_code_type *operation_result)
{
+ if (!link->hpd_status) {
+ *operation_result = AUX_RET_ERROR_HPD_DISCON;
+ return -1;
+ }
+
return amdgpu_dm_process_dmub_aux_transfer_sync(ctx, link->link_index, payload,
operation_result);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index 51467f132c26..58b880acb087 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -894,10 +894,15 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
- struct amdgpu_dm_connector *amdgpu_dm_connector =
- to_amdgpu_dm_connector(connector);
+ struct amdgpu_dm_connector *amdgpu_dm_connector;
+ const struct dc_link *dc_link;
- const struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
+
+ dc_link = amdgpu_dm_connector->dc_link;
if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
dc_interrupt_set(adev->dm.dc,
@@ -930,9 +935,14 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
- struct amdgpu_dm_connector *amdgpu_dm_connector =
- to_amdgpu_dm_connector(connector);
- const struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
+ struct amdgpu_dm_connector *amdgpu_dm_connector;
+ const struct dc_link *dc_link;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
+ dc_link = amdgpu_dm_connector->dc_link;
if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
dc_interrupt_set(adev->dm.dc,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index b599efda3b19..941e96f100f4 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -28,6 +28,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fixed.h>
+#include <drm/drm_edid.h>
#include "dm_services.h"
#include "amdgpu.h"
#include "amdgpu_dm.h"
@@ -1500,14 +1501,16 @@ int pre_validate_dsc(struct drm_atomic_state *state,
int ind = find_crtc_index_in_state_by_stream(state, stream);
if (ind >= 0) {
+ struct drm_connector *connector;
struct amdgpu_dm_connector *aconnector;
struct drm_connector_state *drm_new_conn_state;
struct dm_connector_state *dm_new_conn_state;
struct dm_crtc_state *dm_old_crtc_state;
- aconnector =
+ connector =
amdgpu_dm_find_first_crtc_matching_connector(state,
state->crtcs[ind].ptr);
+ aconnector = to_amdgpu_dm_connector(connector);
drm_new_conn_state =
drm_atomic_get_new_connector_state(state,
&aconnector->base);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index 116121e647ca..8a4c40b4c27e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -1337,8 +1337,14 @@ static void amdgpu_dm_plane_drm_plane_reset(struct drm_plane *plane)
amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
WARN_ON(amdgpu_state == NULL);
- if (amdgpu_state)
- __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
+ if (!amdgpu_state)
+ return;
+
+ __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
+ amdgpu_state->degamma_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT;
+ amdgpu_state->hdr_mult = AMDGPU_HDR_MULT_DEFAULT;
+ amdgpu_state->shaper_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT;
+ amdgpu_state->blend_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT;
}
static struct drm_plane_state *amdgpu_dm_plane_drm_plane_duplicate_state(struct drm_plane *plane)
@@ -1357,6 +1363,27 @@ static struct drm_plane_state *amdgpu_dm_plane_drm_plane_duplicate_state(struct
dc_plane_state_retain(dm_plane_state->dc_state);
}
+ if (old_dm_plane_state->degamma_lut)
+ dm_plane_state->degamma_lut =
+ drm_property_blob_get(old_dm_plane_state->degamma_lut);
+ if (old_dm_plane_state->ctm)
+ dm_plane_state->ctm =
+ drm_property_blob_get(old_dm_plane_state->ctm);
+ if (old_dm_plane_state->shaper_lut)
+ dm_plane_state->shaper_lut =
+ drm_property_blob_get(old_dm_plane_state->shaper_lut);
+ if (old_dm_plane_state->lut3d)
+ dm_plane_state->lut3d =
+ drm_property_blob_get(old_dm_plane_state->lut3d);
+ if (old_dm_plane_state->blend_lut)
+ dm_plane_state->blend_lut =
+ drm_property_blob_get(old_dm_plane_state->blend_lut);
+
+ dm_plane_state->degamma_tf = old_dm_plane_state->degamma_tf;
+ dm_plane_state->hdr_mult = old_dm_plane_state->hdr_mult;
+ dm_plane_state->shaper_tf = old_dm_plane_state->shaper_tf;
+ dm_plane_state->blend_tf = old_dm_plane_state->blend_tf;
+
return &dm_plane_state->base;
}
@@ -1424,12 +1451,206 @@ static void amdgpu_dm_plane_drm_plane_destroy_state(struct drm_plane *plane,
{
struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
+ if (dm_plane_state->degamma_lut)
+ drm_property_blob_put(dm_plane_state->degamma_lut);
+ if (dm_plane_state->ctm)
+ drm_property_blob_put(dm_plane_state->ctm);
+ if (dm_plane_state->lut3d)
+ drm_property_blob_put(dm_plane_state->lut3d);
+ if (dm_plane_state->shaper_lut)
+ drm_property_blob_put(dm_plane_state->shaper_lut);
+ if (dm_plane_state->blend_lut)
+ drm_property_blob_put(dm_plane_state->blend_lut);
+
if (dm_plane_state->dc_state)
dc_plane_state_release(dm_plane_state->dc_state);
drm_atomic_helper_plane_destroy_state(plane, state);
}
+#ifdef AMD_PRIVATE_COLOR
+static void
+dm_atomic_plane_attach_color_mgmt_properties(struct amdgpu_display_manager *dm,
+ struct drm_plane *plane)
+{
+ struct amdgpu_mode_info mode_info = dm->adev->mode_info;
+ struct dpp_color_caps dpp_color_caps = dm->dc->caps.color.dpp;
+
+ /* Check HW color pipeline capabilities on DPP block (pre-blending)
+ * before exposing related properties.
+ */
+ if (dpp_color_caps.dgam_ram || dpp_color_caps.gamma_corr) {
+ drm_object_attach_property(&plane->base,
+ mode_info.plane_degamma_lut_property,
+ 0);
+ drm_object_attach_property(&plane->base,
+ mode_info.plane_degamma_lut_size_property,
+ MAX_COLOR_LUT_ENTRIES);
+ drm_object_attach_property(&plane->base,
+ dm->adev->mode_info.plane_degamma_tf_property,
+ AMDGPU_TRANSFER_FUNCTION_DEFAULT);
+ }
+ /* HDR MULT is always available */
+ drm_object_attach_property(&plane->base,
+ dm->adev->mode_info.plane_hdr_mult_property,
+ AMDGPU_HDR_MULT_DEFAULT);
+
+ /* Only enable plane CTM if both DPP and MPC gamut remap is available. */
+ if (dm->dc->caps.color.mpc.gamut_remap)
+ drm_object_attach_property(&plane->base,
+ dm->adev->mode_info.plane_ctm_property, 0);
+
+ if (dpp_color_caps.hw_3d_lut) {
+ drm_object_attach_property(&plane->base,
+ mode_info.plane_shaper_lut_property, 0);
+ drm_object_attach_property(&plane->base,
+ mode_info.plane_shaper_lut_size_property,
+ MAX_COLOR_LUT_ENTRIES);
+ drm_object_attach_property(&plane->base,
+ mode_info.plane_shaper_tf_property,
+ AMDGPU_TRANSFER_FUNCTION_DEFAULT);
+ drm_object_attach_property(&plane->base,
+ mode_info.plane_lut3d_property, 0);
+ drm_object_attach_property(&plane->base,
+ mode_info.plane_lut3d_size_property,
+ MAX_COLOR_3DLUT_SIZE);
+ }
+
+ if (dpp_color_caps.ogam_ram) {
+ drm_object_attach_property(&plane->base,
+ mode_info.plane_blend_lut_property, 0);
+ drm_object_attach_property(&plane->base,
+ mode_info.plane_blend_lut_size_property,
+ MAX_COLOR_LUT_ENTRIES);
+ drm_object_attach_property(&plane->base,
+ mode_info.plane_blend_tf_property,
+ AMDGPU_TRANSFER_FUNCTION_DEFAULT);
+ }
+}
+
+static int
+dm_atomic_plane_set_property(struct drm_plane *plane,
+ struct drm_plane_state *state,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
+ struct amdgpu_device *adev = drm_to_adev(plane->dev);
+ bool replaced = false;
+ int ret;
+
+ if (property == adev->mode_info.plane_degamma_lut_property) {
+ ret = drm_property_replace_blob_from_id(plane->dev,
+ &dm_plane_state->degamma_lut,
+ val, -1,
+ sizeof(struct drm_color_lut),
+ &replaced);
+ dm_plane_state->base.color_mgmt_changed |= replaced;
+ return ret;
+ } else if (property == adev->mode_info.plane_degamma_tf_property) {
+ if (dm_plane_state->degamma_tf != val) {
+ dm_plane_state->degamma_tf = val;
+ dm_plane_state->base.color_mgmt_changed = 1;
+ }
+ } else if (property == adev->mode_info.plane_hdr_mult_property) {
+ if (dm_plane_state->hdr_mult != val) {
+ dm_plane_state->hdr_mult = val;
+ dm_plane_state->base.color_mgmt_changed = 1;
+ }
+ } else if (property == adev->mode_info.plane_ctm_property) {
+ ret = drm_property_replace_blob_from_id(plane->dev,
+ &dm_plane_state->ctm,
+ val,
+ sizeof(struct drm_color_ctm_3x4), -1,
+ &replaced);
+ dm_plane_state->base.color_mgmt_changed |= replaced;
+ return ret;
+ } else if (property == adev->mode_info.plane_shaper_lut_property) {
+ ret = drm_property_replace_blob_from_id(plane->dev,
+ &dm_plane_state->shaper_lut,
+ val, -1,
+ sizeof(struct drm_color_lut),
+ &replaced);
+ dm_plane_state->base.color_mgmt_changed |= replaced;
+ return ret;
+ } else if (property == adev->mode_info.plane_shaper_tf_property) {
+ if (dm_plane_state->shaper_tf != val) {
+ dm_plane_state->shaper_tf = val;
+ dm_plane_state->base.color_mgmt_changed = 1;
+ }
+ } else if (property == adev->mode_info.plane_lut3d_property) {
+ ret = drm_property_replace_blob_from_id(plane->dev,
+ &dm_plane_state->lut3d,
+ val, -1,
+ sizeof(struct drm_color_lut),
+ &replaced);
+ dm_plane_state->base.color_mgmt_changed |= replaced;
+ return ret;
+ } else if (property == adev->mode_info.plane_blend_lut_property) {
+ ret = drm_property_replace_blob_from_id(plane->dev,
+ &dm_plane_state->blend_lut,
+ val, -1,
+ sizeof(struct drm_color_lut),
+ &replaced);
+ dm_plane_state->base.color_mgmt_changed |= replaced;
+ return ret;
+ } else if (property == adev->mode_info.plane_blend_tf_property) {
+ if (dm_plane_state->blend_tf != val) {
+ dm_plane_state->blend_tf = val;
+ dm_plane_state->base.color_mgmt_changed = 1;
+ }
+ } else {
+ drm_dbg_atomic(plane->dev,
+ "[PLANE:%d:%s] unknown property [PROP:%d:%s]]\n",
+ plane->base.id, plane->name,
+ property->base.id, property->name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+dm_atomic_plane_get_property(struct drm_plane *plane,
+ const struct drm_plane_state *state,
+ struct drm_property *property,
+ uint64_t *val)
+{
+ struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
+ struct amdgpu_device *adev = drm_to_adev(plane->dev);
+
+ if (property == adev->mode_info.plane_degamma_lut_property) {
+ *val = (dm_plane_state->degamma_lut) ?
+ dm_plane_state->degamma_lut->base.id : 0;
+ } else if (property == adev->mode_info.plane_degamma_tf_property) {
+ *val = dm_plane_state->degamma_tf;
+ } else if (property == adev->mode_info.plane_hdr_mult_property) {
+ *val = dm_plane_state->hdr_mult;
+ } else if (property == adev->mode_info.plane_ctm_property) {
+ *val = (dm_plane_state->ctm) ?
+ dm_plane_state->ctm->base.id : 0;
+ } else if (property == adev->mode_info.plane_shaper_lut_property) {
+ *val = (dm_plane_state->shaper_lut) ?
+ dm_plane_state->shaper_lut->base.id : 0;
+ } else if (property == adev->mode_info.plane_shaper_tf_property) {
+ *val = dm_plane_state->shaper_tf;
+ } else if (property == adev->mode_info.plane_lut3d_property) {
+ *val = (dm_plane_state->lut3d) ?
+ dm_plane_state->lut3d->base.id : 0;
+ } else if (property == adev->mode_info.plane_blend_lut_property) {
+ *val = (dm_plane_state->blend_lut) ?
+ dm_plane_state->blend_lut->base.id : 0;
+ } else if (property == adev->mode_info.plane_blend_tf_property) {
+ *val = dm_plane_state->blend_tf;
+
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+#endif
+
static const struct drm_plane_funcs dm_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
@@ -1438,6 +1659,10 @@ static const struct drm_plane_funcs dm_plane_funcs = {
.atomic_duplicate_state = amdgpu_dm_plane_drm_plane_duplicate_state,
.atomic_destroy_state = amdgpu_dm_plane_drm_plane_destroy_state,
.format_mod_supported = amdgpu_dm_plane_format_mod_supported,
+#ifdef AMD_PRIVATE_COLOR
+ .atomic_set_property = dm_atomic_plane_set_property,
+ .atomic_get_property = dm_atomic_plane_get_property,
+#endif
};
int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
@@ -1517,6 +1742,9 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
drm_plane_helper_add(plane, &dm_plane_helper_funcs);
+#ifdef AMD_PRIVATE_COLOR
+ dm_atomic_plane_attach_color_mgmt_properties(dm, plane);
+#endif
/* Create (reset) the plane state */
if (plane->funcs->reset)
plane->funcs->reset(plane);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index 08ce3bb8f640..1f08c6564c3b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -51,6 +51,9 @@ static bool link_supports_psrsu(struct dc_link *link)
!link->dpcd_caps.psr_info.psr2_su_y_granularity_cap)
return false;
+ if (amdgpu_dc_debug_mask & DC_DISABLE_PSR_SU)
+ return false;
+
return dc_dmub_check_min_version(dc->ctx->dmub_srv->dmub);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
new file mode 100644
index 000000000000..16e72d623630
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services_types.h"
+
+#include "amdgpu.h"
+#include "amdgpu_dm.h"
+#include "amdgpu_dm_wb.h"
+#include "amdgpu_display.h"
+#include "dc.h"
+
+#include <drm/drm_edid.h>
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
+
+static const u32 amdgpu_dm_wb_formats[] = {
+ DRM_FORMAT_XRGB2101010,
+};
+
+static int amdgpu_dm_wb_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_framebuffer *fb;
+ const struct drm_display_mode *mode = &crtc_state->mode;
+ bool found = false;
+ uint8_t i;
+
+ if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
+ return 0;
+
+ fb = conn_state->writeback_job->fb;
+ if (fb->width != mode->hdisplay || fb->height != mode->vdisplay) {
+ DRM_DEBUG_KMS("Invalid framebuffer size %ux%u\n",
+ fb->width, fb->height);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < sizeof(amdgpu_dm_wb_formats) / sizeof(u32); i++) {
+ if (fb->format->format == amdgpu_dm_wb_formats[i])
+ found = true;
+ }
+
+ if (!found) {
+ DRM_DEBUG_KMS("Invalid pixel format %p4cc\n",
+ &fb->format->format);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+static int amdgpu_dm_wb_connector_get_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+
+ return drm_add_modes_noedid(connector, dev->mode_config.max_width,
+ dev->mode_config.max_height);
+}
+
+static int amdgpu_dm_wb_prepare_job(struct drm_writeback_connector *wb_connector,
+ struct drm_writeback_job *job)
+{
+ struct amdgpu_framebuffer *afb;
+ struct drm_gem_object *obj;
+ struct amdgpu_device *adev;
+ struct amdgpu_bo *rbo;
+ uint32_t domain;
+ int r;
+
+ if (!job->fb) {
+ DRM_DEBUG_KMS("No FB bound\n");
+ return 0;
+ }
+
+ afb = to_amdgpu_framebuffer(job->fb);
+ obj = job->fb->obj[0];
+ rbo = gem_to_amdgpu_bo(obj);
+ adev = amdgpu_ttm_adev(rbo->tbo.bdev);
+
+ r = amdgpu_bo_reserve(rbo, true);
+ if (r) {
+ dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
+ return r;
+ }
+
+ r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
+ if (r) {
+ dev_err(adev->dev, "reserving fence slot failed (%d)\n", r);
+ goto error_unlock;
+ }
+
+ domain = amdgpu_display_supported_domains(adev, rbo->flags);
+
+ r = amdgpu_bo_pin(rbo, domain);
+ if (unlikely(r != 0)) {
+ if (r != -ERESTARTSYS)
+ DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
+ goto error_unlock;
+ }
+
+ r = amdgpu_ttm_alloc_gart(&rbo->tbo);
+ if (unlikely(r != 0)) {
+ DRM_ERROR("%p bind failed\n", rbo);
+ goto error_unpin;
+ }
+
+ amdgpu_bo_unreserve(rbo);
+
+ afb->address = amdgpu_bo_gpu_offset(rbo);
+
+ amdgpu_bo_ref(rbo);
+
+ return 0;
+
+error_unpin:
+ amdgpu_bo_unpin(rbo);
+
+error_unlock:
+ amdgpu_bo_unreserve(rbo);
+ return r;
+}
+
+static void amdgpu_dm_wb_cleanup_job(struct drm_writeback_connector *connector,
+ struct drm_writeback_job *job)
+{
+ struct amdgpu_bo *rbo;
+ int r;
+
+ if (!job->fb)
+ return;
+
+ rbo = gem_to_amdgpu_bo(job->fb->obj[0]);
+ r = amdgpu_bo_reserve(rbo, false);
+ if (unlikely(r)) {
+ DRM_ERROR("failed to reserve rbo before unpin\n");
+ return;
+ }
+
+ amdgpu_bo_unpin(rbo);
+ amdgpu_bo_unreserve(rbo);
+ amdgpu_bo_unref(&rbo);
+}
+
+static const struct drm_encoder_helper_funcs amdgpu_dm_wb_encoder_helper_funcs = {
+ .atomic_check = amdgpu_dm_wb_encoder_atomic_check,
+};
+
+static const struct drm_connector_funcs amdgpu_dm_wb_connector_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .reset = amdgpu_dm_connector_funcs_reset,
+ .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static const struct drm_connector_helper_funcs amdgpu_dm_wb_conn_helper_funcs = {
+ .get_modes = amdgpu_dm_wb_connector_get_modes,
+ .prepare_writeback_job = amdgpu_dm_wb_prepare_job,
+ .cleanup_writeback_job = amdgpu_dm_wb_cleanup_job,
+};
+
+int amdgpu_dm_wb_connector_init(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_wb_connector *wbcon,
+ uint32_t link_index)
+{
+ struct dc *dc = dm->dc;
+ struct dc_link *link = dc_get_link_at_index(dc, link_index);
+ int res = 0;
+
+ wbcon->link = link;
+
+ drm_connector_helper_add(&wbcon->base.base, &amdgpu_dm_wb_conn_helper_funcs);
+
+ res = drm_writeback_connector_init(&dm->adev->ddev, &wbcon->base,
+ &amdgpu_dm_wb_connector_funcs,
+ &amdgpu_dm_wb_encoder_helper_funcs,
+ amdgpu_dm_wb_formats,
+ ARRAY_SIZE(amdgpu_dm_wb_formats),
+ amdgpu_dm_get_encoder_crtc_mask(dm->adev));
+
+ if (res)
+ return res;
+ /*
+ * Some of the properties below require access to state, like bpc.
+ * Allocate some default initial connector state with our reset helper.
+ */
+ if (wbcon->base.base.funcs->reset)
+ wbcon->base.base.funcs->reset(&wbcon->base.base);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.h
new file mode 100644
index 000000000000..13d31c857dee
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __AMDGPU_DM_WB_H__
+#define __AMDGPU_DM_WB_H__
+
+#include <drm/drm_writeback.h>
+
+int amdgpu_dm_wb_connector_init(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_wb_connector *dm_wbcon,
+ uint32_t link_index);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index 390e7a99be54..7991ae468f75 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -34,8 +34,6 @@ DC_LIBS += dcn21
DC_LIBS += dcn201
DC_LIBS += dcn30
DC_LIBS += dcn301
-DC_LIBS += dcn302
-DC_LIBS += dcn303
DC_LIBS += dcn31
DC_LIBS += dcn314
DC_LIBS += dcn32
@@ -62,7 +60,7 @@ AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LI
include $(AMD_DC)
DISPLAY_CORE = dc.o dc_stat.o dc_resource.o dc_hw_sequencer.o dc_sink.o \
-dc_surface.o dc_debug.o dc_stream.o dc_link_enc_cfg.o dc_link_exports.o
+dc_surface.o dc_debug.o dc_stream.o dc_link_enc_cfg.o dc_link_exports.o dc_state.o
DISPLAY_CORE += dc_vm_helper.o
diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.c b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
index e295a839ab47..1090d235086a 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/conversion.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
@@ -103,7 +103,8 @@ void convert_float_matrix(
static uint32_t find_gcd(uint32_t a, uint32_t b)
{
- uint32_t remainder = 0;
+ uint32_t remainder;
+
while (b != 0) {
remainder = a % b;
a = b;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 7cdb1a8a0ba0..960c4b4f6ddf 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -1014,13 +1014,20 @@ static enum bp_result get_ss_info_v4_5(
DC_LOG_BIOS("AS_SIGNAL_TYPE_HDMI ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
break;
case AS_SIGNAL_TYPE_DISPLAY_PORT:
- ss_info->spread_spectrum_percentage =
+ if (bp->base.integrated_info) {
+ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", bp->base.integrated_info->gpuclk_ss_percentage);
+ ss_info->spread_spectrum_percentage =
+ bp->base.integrated_info->gpuclk_ss_percentage;
+ ss_info->type.CENTER_MODE =
+ bp->base.integrated_info->gpuclk_ss_type;
+ } else {
+ ss_info->spread_spectrum_percentage =
disp_cntl_tbl->dp_ss_percentage;
- ss_info->spread_spectrum_range =
+ ss_info->spread_spectrum_range =
disp_cntl_tbl->dp_ss_rate_10hz * 10;
- if (disp_cntl_tbl->dp_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
- ss_info->type.CENTER_MODE = true;
-
+ if (disp_cntl_tbl->dp_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
+ ss_info->type.CENTER_MODE = true;
+ }
DC_LOG_BIOS("AS_SIGNAL_TYPE_DISPLAY_PORT ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
break;
case AS_SIGNAL_TYPE_GPU_PLL:
@@ -1691,7 +1698,7 @@ static enum bp_result bios_parser_enable_disp_power_gating(
static enum bp_result bios_parser_enable_lvtma_control(
struct dc_bios *dcb,
uint8_t uc_pwr_on,
- uint8_t panel_instance,
+ uint8_t pwrseq_instance,
uint8_t bypass_panel_control_wait)
{
struct bios_parser *bp = BP_FROM_DCB(dcb);
@@ -1699,7 +1706,7 @@ static enum bp_result bios_parser_enable_lvtma_control(
if (!bp->cmd_tbl.enable_lvtma_control)
return BP_RESULT_FAILURE;
- return bp->cmd_tbl.enable_lvtma_control(bp, uc_pwr_on, panel_instance, bypass_panel_control_wait);
+ return bp->cmd_tbl.enable_lvtma_control(bp, uc_pwr_on, pwrseq_instance, bypass_panel_control_wait);
}
static bool bios_parser_is_accelerated_mode(
@@ -2214,22 +2221,22 @@ static enum bp_result bios_parser_get_disp_connector_caps_info(
switch (bp->object_info_tbl.revision.minor) {
case 4:
- default:
- object = get_bios_object(bp, object_id);
-
- if (!object)
- return BP_RESULT_BADINPUT;
-
- record = get_disp_connector_caps_record(bp, object);
- if (!record)
- return BP_RESULT_NORECORD;
-
- info->INTERNAL_DISPLAY =
- (record->connectcaps & ATOM_CONNECTOR_CAP_INTERNAL_DISPLAY) ? 1 : 0;
- info->INTERNAL_DISPLAY_BL =
- (record->connectcaps & ATOM_CONNECTOR_CAP_INTERNAL_DISPLAY_BL) ? 1 : 0;
- break;
- case 5:
+ default:
+ object = get_bios_object(bp, object_id);
+
+ if (!object)
+ return BP_RESULT_BADINPUT;
+
+ record = get_disp_connector_caps_record(bp, object);
+ if (!record)
+ return BP_RESULT_NORECORD;
+
+ info->INTERNAL_DISPLAY =
+ (record->connectcaps & ATOM_CONNECTOR_CAP_INTERNAL_DISPLAY) ? 1 : 0;
+ info->INTERNAL_DISPLAY_BL =
+ (record->connectcaps & ATOM_CONNECTOR_CAP_INTERNAL_DISPLAY_BL) ? 1 : 0;
+ break;
+ case 5:
object_path_v3 = get_bios_object_from_path_v3(bp, object_id);
if (!object_path_v3)
@@ -2391,7 +2398,6 @@ static enum bp_result get_vram_info_v30(
return result;
}
-
/*
* get_integrated_info_v11
*
@@ -2814,6 +2820,8 @@ static enum bp_result get_integrated_info_v2_2(
info->ma_channel_number = info_v2_2->umachannelnumber;
info->dp_ss_control =
le16_to_cpu(info_v2_2->reserved1);
+ info->gpuclk_ss_percentage = info_v2_2->gpuclk_ss_percentage;
+ info->gpuclk_ss_type = info_v2_2->gpuclk_ss_type;
for (i = 0; i < NUMBER_OF_UCHAR_FOR_GUID; ++i) {
info->ext_disp_conn_info.gu_id[i] =
@@ -3323,27 +3331,28 @@ static enum bp_result get_bracket_layout_record(
DC_LOG_DETECTION_EDID_PARSER("Invalid slot_layout_info\n");
return BP_RESULT_BADINPUT;
}
+
tbl = &bp->object_info_tbl;
v1_4 = tbl->v1_4;
v1_5 = tbl->v1_5;
result = BP_RESULT_NORECORD;
switch (bp->object_info_tbl.revision.minor) {
- case 4:
- default:
- for (i = 0; i < v1_4->number_of_path; ++i) {
- if (bracket_layout_id ==
- v1_4->display_path[i].display_objid) {
- result = update_slot_layout_info(dcb, i, slot_layout_info);
- break;
- }
+ case 4:
+ default:
+ for (i = 0; i < v1_4->number_of_path; ++i) {
+ if (bracket_layout_id == v1_4->display_path[i].display_objid) {
+ result = update_slot_layout_info(dcb, i, slot_layout_info);
+ break;
}
- break;
- case 5:
- for (i = 0; i < v1_5->number_of_path; ++i)
- result = update_slot_layout_info_v2(dcb, i, slot_layout_info);
- break;
+ }
+ break;
+ case 5:
+ for (i = 0; i < v1_5->number_of_path; ++i)
+ result = update_slot_layout_info_v2(dcb, i, slot_layout_info);
+ break;
}
+
return result;
}
@@ -3352,9 +3361,7 @@ static enum bp_result bios_get_board_layout_info(
struct board_layout_info *board_layout_info)
{
unsigned int i;
-
struct bios_parser *bp;
-
static enum bp_result record_result;
unsigned int max_slots;
@@ -3364,7 +3371,6 @@ static enum bp_result bios_get_board_layout_info(
0, 0
};
-
bp = BP_FROM_DCB(dcb);
if (board_layout_info == NULL) {
@@ -3545,7 +3551,6 @@ static const struct dc_vbios_funcs vbios_funcs = {
.bios_parser_destroy = firmware_parser_destroy,
.get_board_layout_info = bios_get_board_layout_info,
- /* TODO: use this fn in hw init?*/
.pack_data_tables = bios_parser_pack_data_tables,
.get_atom_dc_golden_table = bios_get_atom_dc_golden_table,
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index 90a02d7bd3da..293a919d605d 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -123,7 +123,7 @@ static void encoder_control_dmcub(
sizeof(cmd.digx_encoder_control.header);
cmd.digx_encoder_control.encoder_control.dig.stream_param = *dig;
- dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
static enum bp_result encoder_control_digx_v1_5(
@@ -259,7 +259,7 @@ static void transmitter_control_dmcub(
sizeof(cmd.dig1_transmitter_control.header);
cmd.dig1_transmitter_control.transmitter_control.dig = *dig;
- dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
static enum bp_result transmitter_control_v1_6(
@@ -321,7 +321,7 @@ static void transmitter_control_dmcub_v1_7(
sizeof(cmd.dig1_transmitter_control.header);
cmd.dig1_transmitter_control.transmitter_control.dig_v1_7 = *dig;
- dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
static enum bp_result transmitter_control_v1_7(
@@ -429,7 +429,7 @@ static void set_pixel_clock_dmcub(
sizeof(cmd.set_pixel_clock.header);
cmd.set_pixel_clock.pixel_clock.clk = *clk;
- dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
static enum bp_result set_pixel_clock_v7(
@@ -796,7 +796,7 @@ static void enable_disp_power_gating_dmcub(
sizeof(cmd.enable_disp_power_gating.header);
cmd.enable_disp_power_gating.power_gating.pwr = *pwr;
- dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
static enum bp_result enable_disp_power_gating_v2_1(
@@ -976,7 +976,7 @@ static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp, uint8_t id)
static enum bp_result enable_lvtma_control(
struct bios_parser *bp,
uint8_t uc_pwr_on,
- uint8_t panel_instance,
+ uint8_t pwrseq_instance,
uint8_t bypass_panel_control_wait);
static void init_enable_lvtma_control(struct bios_parser *bp)
@@ -989,7 +989,7 @@ static void init_enable_lvtma_control(struct bios_parser *bp)
static void enable_lvtma_control_dmcub(
struct dc_dmub_srv *dmcub,
uint8_t uc_pwr_on,
- uint8_t panel_instance,
+ uint8_t pwrseq_instance,
uint8_t bypass_panel_control_wait)
{
@@ -1002,17 +1002,17 @@ static void enable_lvtma_control_dmcub(
DMUB_CMD__VBIOS_LVTMA_CONTROL;
cmd.lvtma_control.data.uc_pwr_action =
uc_pwr_on;
- cmd.lvtma_control.data.panel_inst =
- panel_instance;
+ cmd.lvtma_control.data.pwrseq_inst =
+ pwrseq_instance;
cmd.lvtma_control.data.bypass_panel_control_wait =
bypass_panel_control_wait;
- dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
static enum bp_result enable_lvtma_control(
struct bios_parser *bp,
uint8_t uc_pwr_on,
- uint8_t panel_instance,
+ uint8_t pwrseq_instance,
uint8_t bypass_panel_control_wait)
{
enum bp_result result = BP_RESULT_FAILURE;
@@ -1021,7 +1021,7 @@ static enum bp_result enable_lvtma_control(
bp->base.ctx->dc->debug.dmub_command_table) {
enable_lvtma_control_dmcub(bp->base.ctx->dmub_srv,
uc_pwr_on,
- panel_instance,
+ pwrseq_instance,
bypass_panel_control_wait);
return BP_RESULT_OK;
}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.h b/drivers/gpu/drm/amd/display/dc/bios/command_table2.h
index b6d09bf6cf72..41c8c014397f 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.h
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.h
@@ -96,7 +96,7 @@ struct cmd_tbl {
struct bios_parser *bp, uint8_t id);
enum bp_result (*enable_lvtma_control)(struct bios_parser *bp,
uint8_t uc_pwr_on,
- uint8_t panel_instance,
+ uint8_t pwrseq_instance,
uint8_t bypass_panel_control_wait);
};
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index 3e73c4e59d40..28a2a837d2f0 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -29,6 +29,7 @@
#include "dc_types.h"
#include "dccg.h"
#include "clk_mgr_internal.h"
+#include "dc_state_priv.h"
#include "link.h"
#include "dce100/dce_clk_mgr.h"
@@ -63,7 +64,7 @@ int clk_mgr_helper_get_active_display_cnt(
/* Don't count SubVP phantom pipes as part of active
* display count
*/
- if (stream->mall_stream_config.type == SUBVP_PHANTOM)
+ if (dc_state_get_stream_subvp_type(context, stream) == SUBVP_PHANTOM)
continue;
/*
@@ -368,7 +369,7 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
}
break;
-#endif /* CONFIG_DRM_AMD_DC_FP - Family RV */
+#endif /* CONFIG_DRM_AMD_DC_FP */
default:
ASSERT(0); /* Unknown Asic */
break;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
index 3db4ef564b99..ce1386e22576 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
@@ -253,7 +253,7 @@ void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz;
cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz;
- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
index 7326b7565846..a84f1e376dee 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
@@ -87,6 +87,20 @@ static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0,
#define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L
#define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L
+#define regCLK1_CLK2_BYPASS_CNTL 0x029c
+#define regCLK1_CLK2_BYPASS_CNTL_BASE_IDX 0
+
+#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL__SHIFT 0x0
+#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV__SHIFT 0x10
+#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L
+#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV_MASK 0x000F0000L
+
+#define regCLK6_0_CLK6_spll_field_8 0x464b
+#define regCLK6_0_CLK6_spll_field_8_BASE_IDX 0
+
+#define CLK6_0_CLK6_spll_field_8__spll_ssc_en__SHIFT 0xd
+#define CLK6_0_CLK6_spll_field_8__spll_ssc_en_MASK 0x00002000L
+
#define REG(reg_name) \
(CLK_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
@@ -131,35 +145,63 @@ static int dcn314_get_active_display_cnt_wa(
return display_count;
}
-static void dcn314_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
+static void dcn314_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context,
+ bool safe_to_lower, bool disable)
{
struct dc *dc = clk_mgr_base->ctx->dc;
int i;
for (i = 0; i < dc->res_pool->pipe_count; ++i) {
- struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe = safe_to_lower
+ ? &context->res_ctx.pipe_ctx[i]
+ : &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) {
- struct stream_encoder *stream_enc = pipe->stream_res.stream_enc;
-
if (disable) {
- if (stream_enc && stream_enc->funcs->disable_fifo)
- pipe->stream_res.stream_enc->funcs->disable_fifo(stream_enc);
+ if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc)
+ pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
- pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
reset_sync_context_for_pipe(dc, context, i);
} else {
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
-
- if (stream_enc && stream_enc->funcs->enable_fifo)
- pipe->stream_res.stream_enc->funcs->enable_fifo(stream_enc);
}
}
}
}
+bool dcn314_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ uint32_t ssc_enable;
+
+ REG_GET(CLK6_0_CLK6_spll_field_8, spll_ssc_en, &ssc_enable);
+
+ return ssc_enable == 1;
+}
+
+void dcn314_init_clocks(struct clk_mgr *clk_mgr)
+{
+ struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
+ uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz;
+
+ memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
+ // Assumption is that boot state always supports pstate
+ clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk
+ clk_mgr->clks.p_state_change_support = true;
+ clk_mgr->clks.prev_p_state_change_support = true;
+ clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
+ clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN;
+
+ // to adjust dp_dto reference clock if ssc is enable otherwise to apply dprefclk
+ if (dcn314_is_spll_ssc_enabled(clk_mgr))
+ clk_mgr->dp_dto_source_clock_in_khz =
+ dce_adjust_dp_ref_freq_for_ss(clk_mgr_int, clk_mgr->dprefclk_khz);
+ else
+ clk_mgr->dp_dto_source_clock_in_khz = clk_mgr->dprefclk_khz;
+}
+
void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower)
@@ -252,11 +294,11 @@ void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
}
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
- dcn314_disable_otg_wa(clk_mgr_base, context, true);
+ dcn314_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
dcn314_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
- dcn314_disable_otg_wa(clk_mgr_base, context, false);
+ dcn314_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
update_dispclk = true;
}
@@ -284,7 +326,7 @@ void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz;
cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz;
- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
@@ -436,6 +478,11 @@ static DpmClocks314_t dummy_clocks;
static struct dcn314_watermarks dummy_wms = { 0 };
+static struct dcn314_ss_info_table ss_info_table = {
+ .ss_divider = 1000,
+ .ss_percentage = {0, 0, 375, 375, 375}
+};
+
static void dcn314_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn314_watermarks *table)
{
int i, num_valid_sets;
@@ -708,13 +755,31 @@ static struct clk_mgr_funcs dcn314_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
.update_clocks = dcn314_update_clocks,
- .init_clocks = dcn31_init_clocks,
+ .init_clocks = dcn314_init_clocks,
.enable_pme_wa = dcn314_enable_pme_wa,
.are_clock_states_equal = dcn314_are_clock_states_equal,
.notify_wm_ranges = dcn314_notify_wm_ranges
};
extern struct clk_mgr_funcs dcn3_fpga_funcs;
+static void dcn314_read_ss_info_from_lut(struct clk_mgr_internal *clk_mgr)
+{
+ uint32_t clock_source;
+ //uint32_t ssc_enable;
+
+ REG_GET(CLK1_CLK2_BYPASS_CNTL, CLK2_BYPASS_SEL, &clock_source);
+ //REG_GET(CLK6_0_CLK6_spll_field_8, spll_ssc_en, &ssc_enable);
+
+ if (dcn314_is_spll_ssc_enabled(&clk_mgr->base) && (clock_source < ARRAY_SIZE(ss_info_table.ss_percentage))) {
+ clk_mgr->dprefclk_ss_percentage = ss_info_table.ss_percentage[clock_source];
+
+ if (clk_mgr->dprefclk_ss_percentage != 0) {
+ clk_mgr->ss_on_dprefclk = true;
+ clk_mgr->dprefclk_ss_divider = ss_info_table.ss_divider;
+ }
+ }
+}
+
void dcn314_clk_mgr_construct(
struct dc_context *ctx,
struct clk_mgr_dcn314 *clk_mgr,
@@ -782,6 +847,7 @@ void dcn314_clk_mgr_construct(
clk_mgr->base.base.dprefclk_khz = 600000;
clk_mgr->base.base.clks.ref_dtbclk_khz = 600000;
dce_clock_read_ss_info(&clk_mgr->base);
+ dcn314_read_ss_info_from_lut(&clk_mgr->base);
/*if bios enabled SS, driver needs to adjust dtb clock, only enable with correct bios*/
clk_mgr->base.base.bw_params = &dcn314_bw_params;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h
index 171f84340eb2..002c28e80720 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h
@@ -28,6 +28,8 @@
#define __DCN314_CLK_MGR_H__
#include "clk_mgr_internal.h"
+#define DCN314_NUM_CLOCK_SOURCES 5
+
struct dcn314_watermarks;
struct dcn314_smu_watermark_set {
@@ -40,9 +42,18 @@ struct clk_mgr_dcn314 {
struct dcn314_smu_watermark_set smu_wm_set;
};
+struct dcn314_ss_info_table {
+ uint32_t ss_divider;
+ uint32_t ss_percentage[DCN314_NUM_CLOCK_SOURCES];
+};
+
bool dcn314_are_clock_states_equal(struct dc_clocks *a,
struct dc_clocks *b);
+bool dcn314_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base);
+
+void dcn314_init_clocks(struct clk_mgr *clk_mgr);
+
void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
index 8776055bbeaa..644da4637320 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
@@ -232,7 +232,7 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz;
cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz;
- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
static void dcn315_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
index 09151cc56ce4..12f3e8aa46d8 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
@@ -239,7 +239,7 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz;
cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz;
- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
static void dcn316_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
index a496930b1f9c..aadd07bc68c5 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
@@ -25,7 +25,6 @@
#include "dccg.h"
#include "clk_mgr_internal.h"
-
#include "dcn32/dcn32_clk_mgr_smu_msg.h"
#include "dcn20/dcn20_clk_mgr.h"
#include "dce100/dce_clk_mgr.h"
@@ -34,7 +33,7 @@
#include "core_types.h"
#include "dm_helpers.h"
#include "link.h"
-
+#include "dc_state_priv.h"
#include "atomfirmware.h"
#include "smu13_driver_if.h"
@@ -458,20 +457,56 @@ static int dcn32_get_dispclk_from_dentist(struct clk_mgr *clk_mgr_base)
return 0;
}
-static void dcn32_auto_dpm_test_log(struct dc_clocks *new_clocks, struct clk_mgr_internal *clk_mgr)
+static bool dcn32_check_native_scaling(struct pipe_ctx *pipe)
{
- unsigned int dispclk_khz_reg = REG_READ(CLK1_CLK0_CURRENT_CNT); // DISPCLK
- unsigned int dppclk_khz_reg = REG_READ(CLK1_CLK1_CURRENT_CNT); // DPPCLK
- unsigned int dprefclk_khz_reg = REG_READ(CLK1_CLK2_CURRENT_CNT); // DPREFCLK
- unsigned int dcfclk_khz_reg = REG_READ(CLK1_CLK3_CURRENT_CNT); // DCFCLK
- unsigned int dtbclk_khz_reg = REG_READ(CLK1_CLK4_CURRENT_CNT); // DTBCLK
- unsigned int fclk_khz_reg = REG_READ(CLK4_CLK0_CURRENT_CNT); // FCLK
+ bool is_native_scaling = false;
+ int width = pipe->plane_state->src_rect.width;
+ int height = pipe->plane_state->src_rect.height;
+
+ if (pipe->stream->timing.h_addressable == width &&
+ pipe->stream->timing.v_addressable == height &&
+ pipe->plane_state->dst_rect.width == width &&
+ pipe->plane_state->dst_rect.height == height)
+ is_native_scaling = true;
+
+ return is_native_scaling;
+}
+
+static void dcn32_auto_dpm_test_log(
+ struct dc_clocks *new_clocks,
+ struct clk_mgr_internal *clk_mgr,
+ struct dc_state *context)
+{
+ unsigned int dispclk_khz_reg, dppclk_khz_reg, dprefclk_khz_reg, dcfclk_khz_reg, dtbclk_khz_reg,
+ fclk_khz_reg, mall_ss_size_bytes;
+ int dramclk_khz_override, fclk_khz_override, num_fclk_levels;
+
+ struct pipe_ctx *pipe_ctx_list[MAX_PIPES];
+ int active_pipe_count = 0;
+
+ for (int i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream && dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) {
+ pipe_ctx_list[active_pipe_count] = pipe_ctx;
+ active_pipe_count++;
+ }
+ }
+
+ mall_ss_size_bytes = context->bw_ctx.bw.dcn.mall_ss_size_bytes;
+
+ dispclk_khz_reg = REG_READ(CLK1_CLK0_CURRENT_CNT); // DISPCLK
+ dppclk_khz_reg = REG_READ(CLK1_CLK1_CURRENT_CNT); // DPPCLK
+ dprefclk_khz_reg = REG_READ(CLK1_CLK2_CURRENT_CNT); // DPREFCLK
+ dcfclk_khz_reg = REG_READ(CLK1_CLK3_CURRENT_CNT); // DCFCLK
+ dtbclk_khz_reg = REG_READ(CLK1_CLK4_CURRENT_CNT); // DTBCLK
+ fclk_khz_reg = REG_READ(CLK4_CLK0_CURRENT_CNT); // FCLK
// Overrides for these clocks in case there is no p_state change support
- int dramclk_khz_override = new_clocks->dramclk_khz;
- int fclk_khz_override = new_clocks->fclk_khz;
+ dramclk_khz_override = new_clocks->dramclk_khz;
+ fclk_khz_override = new_clocks->fclk_khz;
- int num_fclk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_fclk_levels - 1;
+ num_fclk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_fclk_levels - 1;
if (!new_clocks->p_state_change_support) {
dramclk_khz_override = clk_mgr->base.bw_params->max_memclk_mhz * 1000;
@@ -488,16 +523,49 @@ static void dcn32_auto_dpm_test_log(struct dc_clocks *new_clocks, struct clk_mgr
//
// AutoDPMTest: clk1:%d - clk2:%d - clk3:%d - clk4:%d\n"
////////////////////////////////////////////////////////////////////////////
- if (new_clocks &&
+ if (new_clocks && active_pipe_count > 0 &&
new_clocks->dramclk_khz > 0 &&
new_clocks->fclk_khz > 0 &&
new_clocks->dcfclk_khz > 0 &&
new_clocks->dppclk_khz > 0) {
+ uint32_t pix_clk_list[MAX_PIPES] = {0};
+ int p_state_list[MAX_PIPES] = {0};
+ int disp_src_width_list[MAX_PIPES] = {0};
+ int disp_src_height_list[MAX_PIPES] = {0};
+ uint64_t disp_src_refresh_list[MAX_PIPES] = {0};
+ bool is_scaled_list[MAX_PIPES] = {0};
+
+ for (int i = 0; i < active_pipe_count; i++) {
+ struct pipe_ctx *curr_pipe_ctx = pipe_ctx_list[i];
+ uint64_t refresh_rate;
+
+ pix_clk_list[i] = curr_pipe_ctx->stream->timing.pix_clk_100hz;
+ p_state_list[i] = curr_pipe_ctx->p_state_type;
+
+ refresh_rate = (curr_pipe_ctx->stream->timing.pix_clk_100hz * (uint64_t)100 +
+ curr_pipe_ctx->stream->timing.v_total * curr_pipe_ctx->stream->timing.h_total - (uint64_t)1);
+ refresh_rate = div_u64(refresh_rate, curr_pipe_ctx->stream->timing.v_total);
+ refresh_rate = div_u64(refresh_rate, curr_pipe_ctx->stream->timing.h_total);
+ disp_src_refresh_list[i] = refresh_rate;
+
+ if (curr_pipe_ctx->plane_state) {
+ is_scaled_list[i] = !(dcn32_check_native_scaling(curr_pipe_ctx));
+ disp_src_width_list[i] = curr_pipe_ctx->plane_state->src_rect.width;
+ disp_src_height_list[i] = curr_pipe_ctx->plane_state->src_rect.height;
+ }
+ }
+
DC_LOG_AUTO_DPM_TEST("AutoDPMTest: dramclk:%d - fclk:%d - "
"dcfclk:%d - dppclk:%d - dispclk_hw:%d - "
"dppclk_hw:%d - dprefclk_hw:%d - dcfclk_hw:%d - "
- "dtbclk_hw:%d - fclk_hw:%d\n",
+ "dtbclk_hw:%d - fclk_hw:%d - pix_clk_0:%d - pix_clk_1:%d - "
+ "pix_clk_2:%d - pix_clk_3:%d - mall_ss_size:%d - p_state_type_0:%d - "
+ "p_state_type_1:%d - p_state_type_2:%d - p_state_type_3:%d - "
+ "pix_width_0:%d - pix_height_0:%d - refresh_rate_0:%lld - is_scaled_0:%d - "
+ "pix_width_1:%d - pix_height_1:%d - refresh_rate_1:%lld - is_scaled_1:%d - "
+ "pix_width_2:%d - pix_height_2:%d - refresh_rate_2:%lld - is_scaled_2:%d - "
+ "pix_width_3:%d - pix_height_3:%d - refresh_rate_3:%lld - is_scaled_3:%d - LOG_END\n",
dramclk_khz_override,
fclk_khz_override,
new_clocks->dcfclk_khz,
@@ -507,7 +575,14 @@ static void dcn32_auto_dpm_test_log(struct dc_clocks *new_clocks, struct clk_mgr
dprefclk_khz_reg,
dcfclk_khz_reg,
dtbclk_khz_reg,
- fclk_khz_reg);
+ fclk_khz_reg,
+ pix_clk_list[0], pix_clk_list[1], pix_clk_list[3], pix_clk_list[2],
+ mall_ss_size_bytes,
+ p_state_list[0], p_state_list[1], p_state_list[2], p_state_list[3],
+ disp_src_width_list[0], disp_src_height_list[0], disp_src_refresh_list[0], is_scaled_list[0],
+ disp_src_width_list[1], disp_src_height_list[1], disp_src_refresh_list[1], is_scaled_list[1],
+ disp_src_width_list[2], disp_src_height_list[2], disp_src_refresh_list[2], is_scaled_list[2],
+ disp_src_width_list[3], disp_src_height_list[3], disp_src_refresh_list[3], is_scaled_list[3]);
}
}
@@ -680,6 +755,7 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
/* DCCG requires KHz precision for DTBCLK */
clk_mgr_base->clks.ref_dtbclk_khz =
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DTBCLK, khz_to_mhz_ceil(new_clocks->ref_dtbclk_khz));
+
dcn32_update_clocks_update_dtb_dto(clk_mgr, context, clk_mgr_base->clks.ref_dtbclk_khz);
}
@@ -708,7 +784,7 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
clk_mgr_base->clks.dispclk_khz / 1000 / 7);
if (dc->config.enable_auto_dpm_test_logs) {
- dcn32_auto_dpm_test_log(new_clocks, clk_mgr);
+ dcn32_auto_dpm_test_log(new_clocks, clk_mgr, context);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
index d5fde7d23fbf..9c660d1facc7 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
@@ -50,6 +50,7 @@
#include "dc_dmub_srv.h"
#include "link.h"
#include "logger_types.h"
+
#undef DC_LOGGER
#define DC_LOGGER \
clk_mgr->base.base.ctx->logger
@@ -80,12 +81,12 @@
static int dcn35_get_active_display_cnt_wa(
struct dc *dc,
- struct dc_state *context)
+ struct dc_state *context,
+ int *all_active_disps)
{
- int i, display_count;
+ int i, display_count = 0;
bool tmds_present = false;
- display_count = 0;
for (i = 0; i < context->stream_count; i++) {
const struct dc_stream_state *stream = context->streams[i];
@@ -103,7 +104,8 @@ static int dcn35_get_active_display_cnt_wa(
link->link_enc->funcs->is_dig_enabled(link->link_enc))
display_count++;
}
-
+ if (all_active_disps != NULL)
+ *all_active_disps = display_count;
/* WA for hang on HDMI after display off back on*/
if (display_count == 0 && tmds_present)
display_count = 1;
@@ -126,21 +128,13 @@ static void dcn35_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *
continue;
if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) ||
!pipe->stream->link_enc)) {
- struct stream_encoder *stream_enc = pipe->stream_res.stream_enc;
-
if (disable) {
- if (stream_enc && stream_enc->funcs->disable_fifo)
- pipe->stream_res.stream_enc->funcs->disable_fifo(stream_enc);
-
if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc)
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
reset_sync_context_for_pipe(dc, context, i);
} else {
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
-
- if (stream_enc && stream_enc->funcs->enable_fifo)
- pipe->stream_res.stream_enc->funcs->enable_fifo(stream_enc);
}
}
}
@@ -224,15 +218,16 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
struct dc *dc = clk_mgr_base->ctx->dc;
- int display_count;
+ int display_count = 0;
bool update_dppclk = false;
bool update_dispclk = false;
bool dpp_clock_lowered = false;
+ int all_active_disps = 0;
if (dc->work_arounds.skip_clock_update)
return;
- /* DTBCLK is fixed, so set a default if unspecified. */
+ display_count = dcn35_get_active_display_cnt_wa(dc, context, &all_active_disps);
if (new_clocks->dtbclk_en && !new_clocks->ref_dtbclk_khz)
new_clocks->ref_dtbclk_khz = 600000;
@@ -254,7 +249,6 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
}
/* check that we're not already in lower */
if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
- display_count = dcn35_get_active_display_cnt_wa(dc, context);
/* if we can go lower, go lower */
if (display_count == 0)
clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
@@ -349,7 +343,7 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz;
cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz;
- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
@@ -424,9 +418,8 @@ bool dcn35_are_clock_states_equal(struct dc_clocks *a,
}
static void dcn35_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
- struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info)
+ struct clk_mgr_dcn35 *clk_mgr)
{
-
}
static struct clk_bw_params dcn35_bw_params = {
@@ -826,7 +819,7 @@ static void dcn35_set_low_power_state(struct clk_mgr *clk_mgr_base)
struct dc_state *context = dc->current_state;
if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
- display_count = dcn35_get_active_display_cnt_wa(dc, context);
+ display_count = dcn35_get_active_display_cnt_wa(dc, context, NULL);
/* if we can go lower, go lower */
if (display_count == 0)
clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
@@ -993,7 +986,6 @@ void dcn35_clk_mgr_construct(
struct dccg *dccg)
{
struct dcn35_smu_dpm_clks smu_dpm_clks = { 0 };
- struct clk_log_info log_info = {0};
clk_mgr->base.base.ctx = ctx;
clk_mgr->base.base.funcs = &dcn35_funcs;
@@ -1046,7 +1038,7 @@ void dcn35_clk_mgr_construct(
dcn35_bw_params.wm_table = ddr5_wm_table;
}
/* Saved clocks configured at boot for debug purposes */
- dcn35_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info);
+ dcn35_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, clk_mgr);
clk_mgr->base.base.dprefclk_khz = dcn35_smu_get_dprefclk(&clk_mgr->base);
clk_mgr->base.base.clks.ref_dtbclk_khz = 600000;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c
index af0a0f292595..6d4a1ffab5ed 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c
@@ -279,7 +279,7 @@ void dcn35_smu_set_display_idle_optimization(struct clk_mgr_internal *clk_mgr, u
clk_mgr,
VBIOSSMC_MSG_SetDisplayIdleOptimizations,
idle_info);
- smu_print("VBIOSSMC_MSG_SetDisplayIdleOptimizations idle_info = %d\n", idle_info);
+ smu_print("%s: VBIOSSMC_MSG_SetDisplayIdleOptimizations idle_info = %x\n", __func__, idle_info);
}
void dcn35_smu_enable_phy_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable)
@@ -298,7 +298,7 @@ void dcn35_smu_enable_phy_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool e
clk_mgr,
VBIOSSMC_MSG_SetDisplayIdleOptimizations,
idle_info.data);
- smu_print("dcn35_smu_enable_phy_refclk_pwrdwn = %d\n", enable ? 1 : 0);
+ smu_print("%s smu_enable_phy_refclk_pwrdwn = %d\n", __func__, enable ? 1 : 0);
}
void dcn35_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr)
@@ -310,6 +310,7 @@ void dcn35_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr)
clk_mgr,
VBIOSSMC_MSG_UpdatePmeRestore,
0);
+ smu_print("%s: SMC_MSG_UpdatePmeRestore\n", __func__);
}
void dcn35_smu_set_dram_addr_high(struct clk_mgr_internal *clk_mgr, uint32_t addr_high)
@@ -350,7 +351,7 @@ void dcn35_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr)
void dcn35_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zstate_support_state support)
{
- unsigned int msg_id, param;
+ unsigned int msg_id, param, retv;
if (!clk_mgr->smu_present)
return;
@@ -360,27 +361,32 @@ void dcn35_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zst
case DCN_ZSTATE_SUPPORT_ALLOW:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
param = (1 << 10) | (1 << 9) | (1 << 8);
+ smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW, param = %d\n", __func__, param);
break;
case DCN_ZSTATE_SUPPORT_DISALLOW:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
param = 0;
+ smu_print("%s: SMC_MSG_AllowZstatesEntry msg_id = DISALLOW, param = %d\n", __func__, param);
break;
case DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
param = (1 << 10);
+ smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z10_ONLY, param = %d\n", __func__, param);
break;
case DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
param = (1 << 10) | (1 << 8);
+ smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z8_Z10_ONLY, param = %d\n", __func__, param);
break;
case DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
param = (1 << 8);
+ smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z8_ONLY, param = %d\n", __func__, param);
break;
default: //DCN_ZSTATE_SUPPORT_UNKNOWN
@@ -390,11 +396,11 @@ void dcn35_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zst
}
- dcn35_smu_send_msg_with_param(
+ retv = dcn35_smu_send_msg_with_param(
clk_mgr,
msg_id,
param);
- smu_print("dcn35_smu_set_zstate_support msg_id = %d, param = %d\n", msg_id, param);
+ smu_print("%s: msg_id = %d, param = 0x%x, return = %d\n", __func__, msg_id, param, retv);
}
int dcn35_smu_get_dprefclk(struct clk_mgr_internal *clk_mgr)
@@ -408,7 +414,7 @@ int dcn35_smu_get_dprefclk(struct clk_mgr_internal *clk_mgr)
VBIOSSMC_MSG_GetDprefclkFreq,
0);
- smu_print("dcn35_smu_get_DPREF clk = %d mhz\n", dprefclk);
+ smu_print("%s: SMU DPREF clk = %d mhz\n", __func__, dprefclk);
return dprefclk * 1000;
}
@@ -423,7 +429,7 @@ int dcn35_smu_get_dtbclk(struct clk_mgr_internal *clk_mgr)
VBIOSSMC_MSG_GetDtbclkFreq,
0);
- smu_print("dcn35_smu_get_dtbclk = %d mhz\n", dtbclk);
+ smu_print("%s: get_dtbclk = %dmhz\n", __func__, dtbclk);
return dtbclk * 1000;
}
/* Arg = 1: Turn DTB on; 0: Turn DTB CLK OFF. when it is on, it is 600MHZ */
@@ -436,7 +442,7 @@ void dcn35_smu_set_dtbclk(struct clk_mgr_internal *clk_mgr, bool enable)
clk_mgr,
VBIOSSMC_MSG_SetDtbClk,
enable);
- smu_print("dcn35_smu_set_dtbclk = %d \n", enable ? 1 : 0);
+ smu_print("%s: smu_set_dtbclk = %d\n", __func__, enable ? 1 : 0);
}
void dcn35_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable)
@@ -445,30 +451,45 @@ void dcn35_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *cl
clk_mgr,
VBIOSSMC_MSG_EnableTmdp48MHzRefclkPwrDown,
enable);
+ smu_print("%s: smu_enable_48mhz_tmdp_refclk_pwrdwn = %d\n", __func__, enable ? 1 : 0);
}
int dcn35_smu_exit_low_power_state(struct clk_mgr_internal *clk_mgr)
{
- return dcn35_smu_send_msg_with_param(
+ int retv;
+
+ retv = dcn35_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_DispPsrExit,
0);
+ smu_print("%s: smu_exit_low_power_state return = %d\n", __func__, retv);
+ return retv;
}
int dcn35_smu_get_ips_supported(struct clk_mgr_internal *clk_mgr)
{
- return dcn35_smu_send_msg_with_param(
+ int retv;
+
+ retv = dcn35_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_QueryIPS2Support,
0);
+
+ //smu_print("%s: VBIOSSMC_MSG_QueryIPS2Support return = %x\n", __func__, retv);
+ return retv;
}
void dcn35_smu_write_ips_scratch(struct clk_mgr_internal *clk_mgr, uint32_t param)
{
REG_WRITE(MP1_SMN_C2PMSG_71, param);
+ //smu_print("%s: write_ips_scratch = %x\n", __func__, param);
}
uint32_t dcn35_smu_read_ips_scratch(struct clk_mgr_internal *clk_mgr)
{
- return REG_READ(MP1_SMN_C2PMSG_71);
+ uint32_t retv;
+
+ retv = REG_READ(MP1_SMN_C2PMSG_71);
+ //smu_print("%s: dcn35_smu_read_ips_scratch = %x\n", __func__, retv);
+ return retv;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index eab713c0da0d..aa7c02ba948e 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -34,6 +34,8 @@
#include "dce/dce_hwseq.h"
#include "resource.h"
+#include "dc_state.h"
+#include "dc_state_priv.h"
#include "gpio_service_interface.h"
#include "clk_mgr.h"
@@ -519,7 +521,7 @@ dc_stream_forward_dmub_crc_window(struct dc_dmub_srv *dmub_srv,
cmd.secure_display.roi_info.y_end = rect->y + rect->height;
}
- dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
+ dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
}
static inline void
@@ -808,7 +810,7 @@ static void dc_destruct(struct dc *dc)
link_enc_cfg_init(dc, dc->current_state);
if (dc->current_state) {
- dc_release_state(dc->current_state);
+ dc_state_release(dc->current_state);
dc->current_state = NULL;
}
@@ -1020,29 +1022,27 @@ static bool dc_construct(struct dc *dc,
}
#endif
+ if (!create_links(dc, init_params->num_virtual_links))
+ goto fail;
+
+ /* Create additional DIG link encoder objects if fewer than the platform
+ * supports were created during link construction.
+ */
+ if (!create_link_encoders(dc))
+ goto fail;
+
/* Creation of current_state must occur after dc->dml
* is initialized in dc_create_resource_pool because
* on creation it copies the contents of dc->dml
*/
- dc->current_state = dc_create_state(dc);
+ dc->current_state = dc_state_create(dc);
if (!dc->current_state) {
dm_error("%s: failed to create validate ctx\n", __func__);
goto fail;
}
- if (!create_links(dc, init_params->num_virtual_links))
- goto fail;
-
- /* Create additional DIG link encoder objects if fewer than the platform
- * supports were created during link construction.
- */
- if (!create_link_encoders(dc))
- goto fail;
-
- dc_resource_state_construct(dc, dc->current_state);
-
return true;
fail:
@@ -1085,7 +1085,7 @@ static void apply_ctx_interdependent_lock(struct dc *dc,
}
}
-static void dc_update_viusal_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
+static void dc_update_visual_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
{
if (dc->ctx->dce_version >= DCN_VERSION_1_0) {
memset(&pipe_ctx->visual_confirm_color, 0, sizeof(struct tg_color));
@@ -1105,9 +1105,9 @@ static void dc_update_viusal_confirm_color(struct dc *dc, struct dc_state *conte
if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE)
get_mpctree_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP)
- get_subvp_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color));
+ get_subvp_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH)
- get_mclk_switch_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color));
+ get_mclk_switch_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
}
}
}
@@ -1115,7 +1115,7 @@ static void dc_update_viusal_confirm_color(struct dc *dc, struct dc_state *conte
static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
{
int i, j;
- struct dc_state *dangling_context = dc_create_state(dc);
+ struct dc_state *dangling_context = dc_state_create_current_copy(dc);
struct dc_state *current_ctx;
struct pipe_ctx *pipe;
struct timing_generator *tg;
@@ -1123,8 +1123,6 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
if (dangling_context == NULL)
return;
- dc_resource_state_copy_construct(dc->current_state, dangling_context);
-
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_stream_state *old_stream =
dc->current_state->res_ctx.pipe_ctx[i].stream;
@@ -1161,6 +1159,7 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
}
if (should_disable && old_stream) {
+ bool is_phantom = dc_state_get_stream_subvp_type(dc->current_state, old_stream) == SUBVP_PHANTOM;
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
tg = pipe->stream_res.tg;
/* When disabling plane for a phantom pipe, we must turn on the
@@ -1169,22 +1168,29 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
* state that can result in underflow or hang when enabling it
* again for different use.
*/
- if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ if (is_phantom) {
if (tg->funcs->enable_crtc) {
int main_pipe_width, main_pipe_height;
+ struct dc_stream_state *old_paired_stream = dc_state_get_paired_subvp_stream(dc->current_state, old_stream);
- main_pipe_width = old_stream->mall_stream_config.paired_stream->dst.width;
- main_pipe_height = old_stream->mall_stream_config.paired_stream->dst.height;
+ main_pipe_width = old_paired_stream->dst.width;
+ main_pipe_height = old_paired_stream->dst.height;
if (dc->hwss.blank_phantom)
dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height);
tg->funcs->enable_crtc(tg);
}
}
- dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
+
+ if (is_phantom)
+ dc_state_rem_all_phantom_planes_for_stream(dc, old_stream, dangling_context, true);
+ else
+ dc_state_rem_all_planes_for_stream(dc, old_stream, dangling_context);
disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
- if (pipe->stream && pipe->plane_state)
- dc_update_viusal_confirm_color(dc, context, pipe);
+ if (pipe->stream && pipe->plane_state) {
+ set_p_state_switch_method(dc, context, pipe);
+ dc_update_visual_confirm_color(dc, context, pipe);
+ }
if (dc->hwss.apply_ctx_for_surface) {
apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true);
@@ -1203,7 +1209,7 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
* The OTG is set to disable on falling edge of VUPDATE so the plane disable
* will still get it's double buffer update.
*/
- if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ if (is_phantom) {
if (tg->funcs->disable_phantom_crtc)
tg->funcs->disable_phantom_crtc(tg);
}
@@ -1212,7 +1218,7 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
current_ctx = dc->current_state;
dc->current_state = dangling_context;
- dc_release_state(current_ctx);
+ dc_state_release(current_ctx);
}
static void disable_vbios_mode_if_required(
@@ -1284,7 +1290,7 @@ static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
int count = 0;
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- if (!pipe->plane_state || pipe->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ if (!pipe->plane_state || dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM)
continue;
/* Timeout 100 ms */
@@ -1510,7 +1516,7 @@ static void program_timing_sync(
}
for (k = 0; k < group_size; k++) {
- struct dc_stream_status *status = dc_stream_get_status_from_state(ctx, pipe_set[k]->stream);
+ struct dc_stream_status *status = dc_state_get_stream_status(ctx, pipe_set[k]->stream);
status->timing_sync_info.group_id = num_group;
status->timing_sync_info.group_size = group_size;
@@ -1521,7 +1527,7 @@ static void program_timing_sync(
}
- /* remove any other pipes that are already been synced */
+ /* remove any other unblanked pipes as they have already been synced */
if (dc->config.use_pipe_ctx_sync_logic) {
/* check pipe's syncd to decide which pipe to be removed */
for (j = 1; j < group_size; j++) {
@@ -1534,6 +1540,7 @@ static void program_timing_sync(
pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd;
}
} else {
+ /* remove any other pipes by checking valid plane */
for (j = j + 1; j < group_size; j++) {
bool is_blanked;
@@ -1554,7 +1561,7 @@ static void program_timing_sync(
if (group_size > 1) {
if (sync_type == TIMING_SYNCHRONIZABLE) {
dc->hwss.enable_timing_synchronization(
- dc, group_index, group_size, pipe_set);
+ dc, ctx, group_index, group_size, pipe_set);
} else
if (sync_type == VBLANK_SYNCHRONIZABLE) {
dc->hwss.enable_vblanks_synchronization(
@@ -1836,7 +1843,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
/* Check old context for SubVP */
- subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM);
+ subvp_prev_use |= (dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) == SUBVP_PHANTOM);
if (subvp_prev_use)
break;
}
@@ -1994,9 +2001,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
old_state = dc->current_state;
dc->current_state = context;
- dc_release_state(old_state);
+ dc_state_release(old_state);
- dc_retain_state(dc->current_state);
+ dc_state_retain(dc->current_state);
return result;
}
@@ -2067,12 +2074,10 @@ enum dc_status dc_commit_streams(struct dc *dc,
if (handle_exit_odm2to1)
res = commit_minimal_transition_state(dc, dc->current_state);
- context = dc_create_state(dc);
+ context = dc_state_create_current_copy(dc);
if (!context)
goto context_alloc_fail;
- dc_resource_state_copy_construct_current(dc, context);
-
res = dc_validate_with_context(dc, set, stream_count, context, false);
if (res != DC_OK) {
BREAK_TO_DEBUGGER();
@@ -2087,7 +2092,7 @@ enum dc_status dc_commit_streams(struct dc *dc,
streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst;
if (dc_is_embedded_signal(streams[i]->signal)) {
- struct dc_stream_status *status = dc_stream_get_status_from_state(context, streams[i]);
+ struct dc_stream_status *status = dc_state_get_stream_status(context, streams[i]);
if (dc->hwss.is_abm_supported)
status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, streams[i]);
@@ -2098,7 +2103,7 @@ enum dc_status dc_commit_streams(struct dc *dc,
}
fail:
- dc_release_state(context);
+ dc_state_release(context);
context_alloc_fail:
@@ -2152,7 +2157,7 @@ static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
pipe = &context->res_ctx.pipe_ctx[i];
// Don't check flip pending on phantom pipes
- if (!pipe->plane_state || (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM))
+ if (!pipe->plane_state || (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM))
continue;
/* Must set to false to start with, due to OR in update function */
@@ -2210,7 +2215,7 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)
if (context->res_ctx.pipe_ctx[i].stream == NULL ||
context->res_ctx.pipe_ctx[i].plane_state == NULL) {
context->res_ctx.pipe_ctx[i].pipe_idx = i;
- dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
+ dc->hwss.disable_plane(dc, context, &context->res_ctx.pipe_ctx[i]);
}
process_deferred_updates(dc);
@@ -2225,110 +2230,6 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)
dc->wm_optimized_required = false;
}
-static void init_state(struct dc *dc, struct dc_state *context)
-{
- /* Each context must have their own instance of VBA and in order to
- * initialize and obtain IP and SOC the base DML instance from DC is
- * initially copied into every context
- */
- memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
-}
-
-struct dc_state *dc_create_state(struct dc *dc)
-{
- struct dc_state *context = kvzalloc(sizeof(struct dc_state),
- GFP_KERNEL);
-
- if (!context)
- return NULL;
-
- init_state(dc, context);
-
-#ifdef CONFIG_DRM_AMD_DC_FP
- if (dc->debug.using_dml2) {
- dml2_create(dc, &dc->dml2_options, &context->bw_ctx.dml2);
- }
-#endif
- kref_init(&context->refcount);
-
- return context;
-}
-
-struct dc_state *dc_copy_state(struct dc_state *src_ctx)
-{
- int i, j;
- struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL);
-#ifdef CONFIG_DRM_AMD_DC_FP
- struct dml2_context *dml2 = NULL;
-#endif
-
- if (!new_ctx)
- return NULL;
- memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
-
-#ifdef CONFIG_DRM_AMD_DC_FP
- if (new_ctx->bw_ctx.dml2) {
- dml2 = kzalloc(sizeof(struct dml2_context), GFP_KERNEL);
- if (!dml2)
- return NULL;
-
- memcpy(dml2, src_ctx->bw_ctx.dml2, sizeof(struct dml2_context));
- new_ctx->bw_ctx.dml2 = dml2;
- }
-#endif
-
- for (i = 0; i < MAX_PIPES; i++) {
- struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
-
- if (cur_pipe->top_pipe)
- cur_pipe->top_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
-
- if (cur_pipe->bottom_pipe)
- cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
-
- if (cur_pipe->prev_odm_pipe)
- cur_pipe->prev_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx];
-
- if (cur_pipe->next_odm_pipe)
- cur_pipe->next_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx];
-
- }
-
- for (i = 0; i < new_ctx->stream_count; i++) {
- dc_stream_retain(new_ctx->streams[i]);
- for (j = 0; j < new_ctx->stream_status[i].plane_count; j++)
- dc_plane_state_retain(
- new_ctx->stream_status[i].plane_states[j]);
- }
-
- kref_init(&new_ctx->refcount);
-
- return new_ctx;
-}
-
-void dc_retain_state(struct dc_state *context)
-{
- kref_get(&context->refcount);
-}
-
-static void dc_state_free(struct kref *kref)
-{
- struct dc_state *context = container_of(kref, struct dc_state, refcount);
- dc_resource_state_destruct(context);
-
-#ifdef CONFIG_DRM_AMD_DC_FP
- dml2_destroy(context->bw_ctx.dml2);
- context->bw_ctx.dml2 = 0;
-#endif
-
- kvfree(context);
-}
-
-void dc_release_state(struct dc_state *context)
-{
- kref_put(&context->refcount, dc_state_free);
-}
-
bool dc_set_generic_gpio_for_stereo(bool enable,
struct gpio_service *gpio_service)
{
@@ -3001,11 +2902,9 @@ static void copy_stream_update_to_stream(struct dc *dc,
update->dsc_config->num_slices_v != 0);
/* Use temporarry context for validating new DSC config */
- struct dc_state *dsc_validate_context = dc_create_state(dc);
+ struct dc_state *dsc_validate_context = dc_state_create_copy(dc->current_state);
if (dsc_validate_context) {
- dc_resource_state_copy_construct(dc->current_state, dsc_validate_context);
-
stream->timing.dsc_cfg = *update->dsc_config;
stream->timing.flags.DSC = enable_dsc;
if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) {
@@ -3014,7 +2913,7 @@ static void copy_stream_update_to_stream(struct dc *dc,
update->dsc_config = NULL;
}
- dc_release_state(dsc_validate_context);
+ dc_state_release(dsc_validate_context);
} else {
DC_ERROR("Failed to allocate new validate context for DSC change\n");
update->dsc_config = NULL;
@@ -3113,30 +3012,27 @@ static bool update_planes_and_stream_state(struct dc *dc,
new_planes[i] = srf_updates[i].surface;
/* initialize scratch memory for building context */
- context = dc_create_state(dc);
+ context = dc_state_create_copy(dc->current_state);
if (context == NULL) {
DC_ERROR("Failed to allocate new validate context!\n");
return false;
}
- dc_resource_state_copy_construct(
- dc->current_state, context);
-
/* For each full update, remove all existing phantom pipes first.
* Ensures that we have enough pipes for newly added MPO planes
*/
- if (dc->res_pool->funcs->remove_phantom_pipes)
- dc->res_pool->funcs->remove_phantom_pipes(dc, context, false);
+ dc_state_remove_phantom_streams_and_planes(dc, context);
+ dc_state_release_phantom_streams_and_planes(dc, context);
/*remove old surfaces from context */
- if (!dc_rem_all_planes_for_stream(dc, stream, context)) {
+ if (!dc_state_rem_all_planes_for_stream(dc, stream, context)) {
BREAK_TO_DEBUGGER();
goto fail;
}
/* add surface to context */
- if (!dc_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) {
+ if (!dc_state_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) {
BREAK_TO_DEBUGGER();
goto fail;
@@ -3161,19 +3057,6 @@ static bool update_planes_and_stream_state(struct dc *dc,
if (update_type == UPDATE_TYPE_FULL) {
if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
- /* For phantom pipes we remove and create a new set of phantom pipes
- * for each full update (because we don't know if we'll need phantom
- * pipes until after the first round of validation). However, if validation
- * fails we need to keep the existing phantom pipes (because we don't update
- * the dc->current_state).
- *
- * The phantom stream/plane refcount is decremented for validation because
- * we assume it'll be removed (the free comes when the dc_state is freed),
- * but if validation fails we have to increment back the refcount so it's
- * consistent.
- */
- if (dc->res_pool->funcs->retain_phantom_pipes)
- dc->res_pool->funcs->retain_phantom_pipes(dc, dc->current_state);
BREAK_TO_DEBUGGER();
goto fail;
}
@@ -3194,7 +3077,7 @@ static bool update_planes_and_stream_state(struct dc *dc,
return true;
fail:
- dc_release_state(context);
+ dc_state_release(context);
return false;
@@ -3390,7 +3273,7 @@ void dc_dmub_update_dirty_rect(struct dc *dc,
update_dirty_rect->panel_inst = panel_inst;
update_dirty_rect->pipe_idx = j;
- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
}
}
}
@@ -3492,18 +3375,24 @@ static void commit_planes_for_stream_fast(struct dc *dc,
{
int i, j;
struct pipe_ctx *top_pipe_to_program = NULL;
+ struct dc_stream_status *stream_status = NULL;
dc_z10_restore(dc);
top_pipe_to_program = resource_get_otg_master_for_stream(
&context->res_ctx,
stream);
- if (dc->debug.visual_confirm) {
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ if (!top_pipe_to_program)
+ return;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream && pipe->plane_state) {
+ set_p_state_switch_method(dc, context, pipe);
- if (pipe->stream && pipe->plane_state)
- dc_update_viusal_confirm_color(dc, context, pipe);
+ if (dc->debug.visual_confirm)
+ dc_update_visual_confirm_color(dc, context, pipe);
}
}
@@ -3527,6 +3416,8 @@ static void commit_planes_for_stream_fast(struct dc *dc,
}
}
+ stream_status = dc_state_get_stream_status(context, stream);
+
build_dmub_cmd_list(dc,
srf_updates,
surface_count,
@@ -3539,7 +3430,8 @@ static void commit_planes_for_stream_fast(struct dc *dc,
context->dmub_cmd_count,
context->block_sequence,
&(context->block_sequence_steps),
- top_pipe_to_program);
+ top_pipe_to_program,
+ stream_status);
hwss_execute_sequence(dc,
context->block_sequence,
context->block_sequence_steps);
@@ -3630,12 +3522,12 @@ static void commit_planes_for_stream(struct dc *dc,
top_pipe_to_program = resource_get_otg_master_for_stream(
&context->res_ctx,
stream);
-
+ ASSERT(top_pipe_to_program != NULL);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
// Check old context for SubVP
- subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM);
+ subvp_prev_use |= (dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) == SUBVP_PHANTOM);
if (subvp_prev_use)
break;
}
@@ -3643,19 +3535,22 @@ static void commit_planes_for_stream(struct dc *dc,
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
subvp_curr_use = true;
break;
}
}
- if (dc->debug.visual_confirm)
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream && pipe->plane_state) {
+ set_p_state_switch_method(dc, context, pipe);
- if (pipe->stream && pipe->plane_state)
- dc_update_viusal_confirm_color(dc, context, pipe);
+ if (dc->debug.visual_confirm)
+ dc_update_visual_confirm_color(dc, context, pipe);
}
+ }
if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
struct pipe_ctx *mpcc_pipe;
@@ -4028,7 +3923,7 @@ static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE) {
+ if (dc_state_get_pipe_subvp_type(dc->current_state, pipe) != SUBVP_NONE) {
subvp_active = true;
break;
}
@@ -4065,7 +3960,7 @@ struct pipe_split_policy_backup {
static void release_minimal_transition_state(struct dc *dc,
struct dc_state *context, struct pipe_split_policy_backup *policy)
{
- dc_release_state(context);
+ dc_state_release(context);
/* restore previous pipe split and odm policy */
if (!dc->config.is_vmin_only_asic)
dc->debug.pipe_split_policy = policy->mpc_policy;
@@ -4076,7 +3971,7 @@ static void release_minimal_transition_state(struct dc *dc,
static struct dc_state *create_minimal_transition_state(struct dc *dc,
struct dc_state *base_context, struct pipe_split_policy_backup *policy)
{
- struct dc_state *minimal_transition_context = dc_create_state(dc);
+ struct dc_state *minimal_transition_context = NULL;
unsigned int i, j;
if (!dc->config.is_vmin_only_asic) {
@@ -4088,7 +3983,9 @@ static struct dc_state *create_minimal_transition_state(struct dc *dc,
policy->subvp_policy = dc->debug.force_disable_subvp;
dc->debug.force_disable_subvp = true;
- dc_resource_state_copy_construct(base_context, minimal_transition_context);
+ minimal_transition_context = dc_state_create_copy(base_context);
+ if (!minimal_transition_context)
+ return NULL;
/* commit minimal state */
if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false)) {
@@ -4120,7 +4017,6 @@ static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc,
bool success = false;
struct dc_state *minimal_transition_context;
struct pipe_split_policy_backup policy;
- struct mall_temp_config mall_temp_config;
/* commit based on new context */
/* Since all phantom pipes are removed in full validation,
@@ -4129,8 +4025,6 @@ static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc,
* pipe as subvp/phantom will be cleared (dc copy constructor
* creates a shallow copy).
*/
- if (dc->res_pool->funcs->save_mall_state)
- dc->res_pool->funcs->save_mall_state(dc, context, &mall_temp_config);
minimal_transition_context = create_minimal_transition_state(dc,
context, &policy);
if (minimal_transition_context) {
@@ -4143,16 +4037,6 @@ static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc,
success = dc_commit_state_no_check(dc, minimal_transition_context) == DC_OK;
}
release_minimal_transition_state(dc, minimal_transition_context, &policy);
- if (dc->res_pool->funcs->restore_mall_state)
- dc->res_pool->funcs->restore_mall_state(dc, context, &mall_temp_config);
- /* If we do a minimal transition with plane removal and the context
- * has subvp we also have to retain back the phantom stream / planes
- * since the refcount is decremented as part of the min transition
- * (we commit a state with no subvp, so the phantom streams / planes
- * had to be removed).
- */
- if (dc->res_pool->funcs->retain_phantom_pipes)
- dc->res_pool->funcs->retain_phantom_pipes(dc, context);
}
if (!success) {
@@ -4220,7 +4104,7 @@ static bool commit_minimal_transition_state(struct dc *dc,
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ if (pipe->stream && dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_PHANTOM) {
subvp_in_use = true;
break;
}
@@ -4461,6 +4345,8 @@ static bool should_commit_minimal_transition_for_windowed_mpo_odm(struct dc *dc,
cur_pipe = resource_get_otg_master_for_stream(&dc->current_state->res_ctx, stream);
new_pipe = resource_get_otg_master_for_stream(&context->res_ctx, stream);
+ if (!cur_pipe || !new_pipe)
+ return false;
cur_is_odm_in_use = resource_get_odm_slice_count(cur_pipe) > 1;
new_is_odm_in_use = resource_get_odm_slice_count(new_pipe) > 1;
if (cur_is_odm_in_use == new_is_odm_in_use)
@@ -4486,7 +4372,6 @@ bool dc_update_planes_and_stream(struct dc *dc,
struct dc_state *context;
enum surface_update_type update_type;
int i;
- struct mall_temp_config mall_temp_config;
struct dc_fast_update fast_update[MAX_SURFACES] = {0};
/* In cases where MPO and split or ODM are used transitions can
@@ -4530,23 +4415,10 @@ bool dc_update_planes_and_stream(struct dc *dc,
* pipe as subvp/phantom will be cleared (dc copy constructor
* creates a shallow copy).
*/
- if (dc->res_pool->funcs->save_mall_state)
- dc->res_pool->funcs->save_mall_state(dc, context, &mall_temp_config);
if (!commit_minimal_transition_state(dc, context)) {
- dc_release_state(context);
+ dc_state_release(context);
return false;
}
- if (dc->res_pool->funcs->restore_mall_state)
- dc->res_pool->funcs->restore_mall_state(dc, context, &mall_temp_config);
-
- /* If we do a minimal transition with plane removal and the context
- * has subvp we also have to retain back the phantom stream / planes
- * since the refcount is decremented as part of the min transition
- * (we commit a state with no subvp, so the phantom streams / planes
- * had to be removed).
- */
- if (dc->res_pool->funcs->retain_phantom_pipes)
- dc->res_pool->funcs->retain_phantom_pipes(dc, context);
update_type = UPDATE_TYPE_FULL;
}
@@ -4603,7 +4475,7 @@ bool dc_update_planes_and_stream(struct dc *dc,
struct dc_state *old = dc->current_state;
dc->current_state = context;
- dc_release_state(old);
+ dc_state_release(old);
// clear any forced full updates
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -4662,14 +4534,12 @@ void dc_commit_updates_for_stream(struct dc *dc,
if (update_type >= UPDATE_TYPE_FULL) {
/* initialize scratch memory for building context */
- context = dc_create_state(dc);
+ context = dc_state_create_copy(state);
if (context == NULL) {
DC_ERROR("Failed to allocate new validate context!\n");
return;
}
- dc_resource_state_copy_construct(state, context);
-
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -4708,7 +4578,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
if (update_type >= UPDATE_TYPE_FULL) {
if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
DC_ERROR("Mode validation failed for stream update!\n");
- dc_release_state(context);
+ dc_state_release(context);
return;
}
}
@@ -4741,7 +4611,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
struct dc_state *old = dc->current_state;
dc->current_state = context;
- dc_release_state(old);
+ dc_state_release(old);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
@@ -4814,7 +4684,7 @@ void dc_set_power_state(
switch (power_state) {
case DC_ACPI_CM_POWER_STATE_D0:
- dc_resource_state_construct(dc, dc->current_state);
+ dc_state_construct(dc, dc->current_state);
dc_z10_restore(dc);
@@ -4829,7 +4699,7 @@ void dc_set_power_state(
default:
ASSERT(dc->current_state->stream_count == 0);
- dc_resource_state_destruct(dc->current_state);
+ dc_state_destruct(dc->current_state);
break;
}
@@ -4906,6 +4776,38 @@ bool dc_set_psr_allow_active(struct dc *dc, bool enable)
return true;
}
+/* enable/disable eDP Replay without specify stream for eDP */
+bool dc_set_replay_allow_active(struct dc *dc, bool active)
+{
+ int i;
+ bool allow_active;
+
+ for (i = 0; i < dc->current_state->stream_count; i++) {
+ struct dc_link *link;
+ struct dc_stream_state *stream = dc->current_state->streams[i];
+
+ link = stream->link;
+ if (!link)
+ continue;
+
+ if (link->replay_settings.replay_feature_enabled) {
+ if (active && !link->replay_settings.replay_allow_active) {
+ allow_active = true;
+ if (!dc_link_set_replay_allow_active(link, &allow_active,
+ false, false, NULL))
+ return false;
+ } else if (!active && link->replay_settings.replay_allow_active) {
+ allow_active = false;
+ if (!dc_link_set_replay_allow_active(link, &allow_active,
+ true, false, NULL))
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
void dc_allow_idle_optimizations(struct dc *dc, bool allow)
{
if (dc->debug.disable_idle_power_optimizations)
@@ -5099,18 +5001,28 @@ void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc)
*/
bool dc_is_dmub_outbox_supported(struct dc *dc)
{
- /* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */
- if (dc->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
- dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 &&
- !dc->debug.dpia_debug.bits.disable_dpia)
- return true;
+ switch (dc->ctx->asic_id.chip_family) {
- if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_1 &&
- !dc->debug.dpia_debug.bits.disable_dpia)
- return true;
+ case FAMILY_YELLOW_CARP:
+ /* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */
+ if (dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 &&
+ !dc->debug.dpia_debug.bits.disable_dpia)
+ return true;
+ break;
+
+ case AMDGPU_FAMILY_GC_11_0_1:
+ case AMDGPU_FAMILY_GC_11_5_0:
+ if (!dc->debug.dpia_debug.bits.disable_dpia)
+ return true;
+ break;
+
+ default:
+ break;
+ }
/* dmub aux needs dmub notifications to be enabled */
return dc->debug.enable_dmub_aux_for_legacy_ddc;
+
}
/**
@@ -5207,7 +5119,7 @@ bool dc_process_dmub_aux_transfer_async(struct dc *dc,
);
}
- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
return true;
}
@@ -5261,7 +5173,7 @@ bool dc_process_dmub_set_config_async(struct dc *dc,
cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type;
cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data;
- if (!dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) {
+ if (!dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) {
/* command is not processed by dmub */
notify->sc_status = SET_CONFIG_UNKNOWN_ERROR;
return is_cmd_complete;
@@ -5304,7 +5216,7 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst;
cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots;
- if (!dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
+ if (!dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
/* command is not processed by dmub */
return DC_ERROR_UNEXPECTED;
@@ -5342,7 +5254,7 @@ void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc,
cmd.dpia_hpd_int_enable.header.type = DMUB_CMD__DPIA_HPD_INT_ENABLE;
cmd.dpia_hpd_int_enable.enable = hpd_int_enable;
- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
DC_LOG_DEBUG("%s: hpd_int_enable(%d)\n", __func__, hpd_int_enable);
}
@@ -5441,6 +5353,8 @@ bool dc_abm_save_restore(
struct dc_link *link = stream->sink->link;
struct dc_link *edp_links[MAX_NUM_EDP];
+ if (link->replay_settings.replay_feature_enabled)
+ return false;
/*find primary pipe associated with stream*/
for (i = 0; i < MAX_PIPES; i++) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index fe07160932d6..9c05b1a07142 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -31,6 +31,7 @@
#include "basics/dc_common.h"
#include "resource.h"
#include "dc_dmub_srv.h"
+#include "dc_state_priv.h"
#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
@@ -425,45 +426,130 @@ void get_hdr_visual_confirm_color(
}
void get_subvp_visual_confirm_color(
- struct dc *dc,
- struct dc_state *context,
struct pipe_ctx *pipe_ctx,
struct tg_color *color)
{
uint32_t color_value = MAX_TG_COLOR_VALUE;
- bool enable_subvp = false;
- int i;
-
- if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx || !context)
- return;
+ if (pipe_ctx) {
+ switch (pipe_ctx->p_state_type) {
+ case P_STATE_SUB_VP:
+ color->color_r_cr = color_value;
+ color->color_g_y = 0;
+ color->color_b_cb = 0;
+ break;
+ case P_STATE_DRR_SUB_VP:
+ color->color_r_cr = 0;
+ color->color_g_y = color_value;
+ color->color_b_cb = 0;
+ break;
+ case P_STATE_V_BLANK_SUB_VP:
+ color->color_r_cr = 0;
+ color->color_g_y = 0;
+ color->color_b_cb = color_value;
+ break;
+ default:
+ break;
+ }
+ }
+}
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+void get_mclk_switch_visual_confirm_color(
+ struct pipe_ctx *pipe_ctx,
+ struct tg_color *color)
+{
+ uint32_t color_value = MAX_TG_COLOR_VALUE;
- if (pipe->stream && pipe->stream->mall_stream_config.paired_stream &&
- pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
- /* SubVP enable - red */
- color->color_g_y = 0;
+ if (pipe_ctx) {
+ switch (pipe_ctx->p_state_type) {
+ case P_STATE_V_BLANK:
+ color->color_r_cr = color_value;
+ color->color_g_y = color_value;
color->color_b_cb = 0;
+ break;
+ case P_STATE_FPO:
+ color->color_r_cr = 0;
+ color->color_g_y = color_value;
+ color->color_b_cb = color_value;
+ break;
+ case P_STATE_V_ACTIVE:
color->color_r_cr = color_value;
- enable_subvp = true;
-
- if (pipe_ctx->stream == pipe->stream)
- return;
+ color->color_g_y = 0;
+ color->color_b_cb = color_value;
+ break;
+ case P_STATE_SUB_VP:
+ color->color_r_cr = color_value;
+ color->color_g_y = 0;
+ color->color_b_cb = 0;
+ break;
+ case P_STATE_DRR_SUB_VP:
+ color->color_r_cr = 0;
+ color->color_g_y = color_value;
+ color->color_b_cb = 0;
+ break;
+ case P_STATE_V_BLANK_SUB_VP:
+ color->color_r_cr = 0;
+ color->color_g_y = 0;
+ color->color_b_cb = color_value;
+ break;
+ default:
break;
}
}
+}
- if (enable_subvp && pipe_ctx->stream->mall_stream_config.type == SUBVP_NONE) {
- color->color_r_cr = 0;
- if (pipe_ctx->stream->allow_freesync == 1) {
- /* SubVP enable and DRR on - green */
- color->color_b_cb = 0;
- color->color_g_y = color_value;
+void set_p_state_switch_method(
+ struct dc *dc,
+ struct dc_state *context,
+ struct pipe_ctx *pipe_ctx)
+{
+ struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
+ bool enable_subvp;
+
+ if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx || !vba || !context)
+ return;
+
+ if (vba->DRAMClockChangeSupport[vba->VoltageLevel][vba->maxMpcComb] !=
+ dm_dram_clock_change_unsupported) {
+ /* MCLK switching is supported */
+ if (!pipe_ctx->has_vactive_margin) {
+ /* In Vblank - yellow */
+ pipe_ctx->p_state_type = P_STATE_V_BLANK;
+
+ if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
+ /* FPO + Vblank - cyan */
+ pipe_ctx->p_state_type = P_STATE_FPO;
+ }
} else {
- /* SubVP enable and No DRR - blue */
- color->color_g_y = 0;
- color->color_b_cb = color_value;
+ /* In Vactive - pink */
+ pipe_ctx->p_state_type = P_STATE_V_ACTIVE;
+ }
+
+ /* SubVP */
+ enable_subvp = false;
+
+ for (int i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream && dc_state_get_paired_subvp_stream(context, pipe->stream) &&
+ dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) {
+ /* SubVP enable - red */
+ pipe_ctx->p_state_type = P_STATE_SUB_VP;
+ enable_subvp = true;
+
+ if (pipe_ctx->stream == pipe->stream)
+ return;
+ break;
+ }
+ }
+
+ if (enable_subvp && dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_NONE) {
+ if (pipe_ctx->stream->allow_freesync == 1) {
+ /* SubVP enable and DRR on - green */
+ pipe_ctx->p_state_type = P_STATE_DRR_SUB_VP;
+ } else {
+ /* SubVP enable and No DRR - blue */
+ pipe_ctx->p_state_type = P_STATE_V_BLANK_SUB_VP;
+ }
}
}
}
@@ -473,7 +559,8 @@ void hwss_build_fast_sequence(struct dc *dc,
unsigned int dmub_cmd_count,
struct block_sequence block_sequence[],
int *num_steps,
- struct pipe_ctx *pipe_ctx)
+ struct pipe_ctx *pipe_ctx,
+ struct dc_stream_status *stream_status)
{
struct dc_plane_state *plane = pipe_ctx->plane_state;
struct dc_stream_state *stream = pipe_ctx->stream;
@@ -490,7 +577,8 @@ void hwss_build_fast_sequence(struct dc *dc,
if (dc->hwss.subvp_pipe_control_lock_fast) {
block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.dc = dc;
block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.lock = true;
- block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.pipe_ctx = pipe_ctx;
+ block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.subvp_immediate_flip =
+ plane->flip_immediate && stream_status->mall_stream_config.type == SUBVP_MAIN;
block_sequence[*num_steps].func = DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST;
(*num_steps)++;
}
@@ -529,7 +617,7 @@ void hwss_build_fast_sequence(struct dc *dc,
}
if (dc->hwss.update_plane_addr && current_mpc_pipe->plane_state->update_flags.bits.addr_update) {
if (resource_is_pipe_type(current_mpc_pipe, OTG_MASTER) &&
- current_mpc_pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
+ stream_status->mall_stream_config.type == SUBVP_MAIN) {
block_sequence[*num_steps].params.subvp_save_surf_addr.dc_dmub_srv = dc->ctx->dmub_srv;
block_sequence[*num_steps].params.subvp_save_surf_addr.addr = &current_mpc_pipe->plane_state->address;
block_sequence[*num_steps].params.subvp_save_surf_addr.subvp_index = current_mpc_pipe->subvp_index;
@@ -612,7 +700,8 @@ void hwss_build_fast_sequence(struct dc *dc,
if (dc->hwss.subvp_pipe_control_lock_fast) {
block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.dc = dc;
block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.lock = false;
- block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.pipe_ctx = pipe_ctx;
+ block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.subvp_immediate_flip =
+ plane->flip_immediate && stream_status->mall_stream_config.type == SUBVP_MAIN;
block_sequence[*num_steps].func = DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST;
(*num_steps)++;
}
@@ -724,7 +813,7 @@ void hwss_send_dmcub_cmd(union block_sequence_params *params)
union dmub_rb_cmd *cmd = params->send_dmcub_cmd_params.cmd;
enum dm_dmub_wait_type wait_type = params->send_dmcub_cmd_params.wait_type;
- dm_execute_dmub_cmd(ctx, cmd, wait_type);
+ dc_wake_and_execute_dmub_cmd(ctx, cmd, wait_type);
}
void hwss_program_manual_trigger(union block_sequence_params *params)
@@ -812,42 +901,6 @@ void hwss_subvp_save_surf_addr(union block_sequence_params *params)
dc_dmub_srv_subvp_save_surf_addr(dc_dmub_srv, addr, subvp_index);
}
-void get_mclk_switch_visual_confirm_color(
- struct dc *dc,
- struct dc_state *context,
- struct pipe_ctx *pipe_ctx,
- struct tg_color *color)
-{
- uint32_t color_value = MAX_TG_COLOR_VALUE;
- struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
-
- if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx || !vba || !context)
- return;
-
- if (vba->DRAMClockChangeSupport[vba->VoltageLevel][vba->maxMpcComb] !=
- dm_dram_clock_change_unsupported) {
- /* MCLK switching is supported */
- if (!pipe_ctx->has_vactive_margin) {
- /* In Vblank - yellow */
- color->color_r_cr = color_value;
- color->color_g_y = color_value;
-
- if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
- /* FPO + Vblank - cyan */
- color->color_r_cr = 0;
- color->color_g_y = color_value;
- color->color_b_cb = color_value;
- }
- } else {
- /* In Vactive - pink */
- color->color_r_cr = color_value;
- color->color_b_cb = color_value;
- }
- /* SubVP */
- get_subvp_visual_confirm_color(dc, context, pipe_ctx, color);
- }
-}
-
void get_surface_tile_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
struct tg_color *color)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
index ed94187c2afa..c6c35037bdb8 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
@@ -467,6 +467,13 @@ bool dc_link_setup_psr(struct dc_link *link,
return link->dc->link_srv->edp_setup_psr(link, stream, psr_config, psr_context);
}
+bool dc_link_set_replay_allow_active(struct dc_link *link, const bool *allow_active,
+ bool wait, bool force_static, const unsigned int *power_opts)
+{
+ return link->dc->link_srv->edp_set_replay_allow_active(link, allow_active, wait,
+ force_static, power_opts);
+}
+
bool dc_link_get_replay_state(const struct dc_link *link, uint64_t *state)
{
return link->dc->link_srv->edp_get_replay_state(link, state);
@@ -497,7 +504,7 @@ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable)
link->dc->link_srv->enable_hpd_filter(link, enable);
}
-bool dc_link_validate(struct dc *dc, const struct dc_stream_state *streams, const unsigned int count)
+bool dc_link_dp_dpia_validate(struct dc *dc, const struct dc_stream_state *streams, const unsigned int count)
{
return dc->link_srv->validate_dpia_bandwidth(streams, count);
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index e1c02527d04a..9fbdb09697fd 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -42,6 +42,7 @@
#include "link_enc_cfg.h"
#include "link.h"
#include "clk_mgr.h"
+#include "dc_state_priv.h"
#include "virtual/virtual_link_hwss.h"
#include "link/hwss/link_hwss_dio.h"
#include "link/hwss/link_hwss_dpia.h"
@@ -2193,6 +2194,10 @@ void resource_log_pipe_topology_update(struct dc *dc, struct dc_state *state)
for (stream_idx = 0; stream_idx < state->stream_count; stream_idx++) {
otg_master = resource_get_otg_master_for_stream(
&state->res_ctx, state->streams[stream_idx]);
+ if (!otg_master || otg_master->stream_res.tg == NULL) {
+ DC_LOG_DC("topology update: otg_master NULL stream_idx %d!\n", stream_idx);
+ return;
+ }
slice_count = resource_get_opp_heads_for_otg_master(otg_master,
&state->res_ctx, opp_heads);
for (slice_idx = 0; slice_idx < slice_count; slice_idx++) {
@@ -2459,6 +2464,9 @@ void resource_remove_otg_master_for_stream_output(struct dc_state *context,
struct pipe_ctx *otg_master = resource_get_otg_master_for_stream(
&context->res_ctx, stream);
+ if (!otg_master)
+ return;
+
ASSERT(resource_get_odm_slice_count(otg_master) == 1);
ASSERT(otg_master->plane_state == NULL);
ASSERT(otg_master->stream_res.stream_enc);
@@ -2993,189 +3001,6 @@ bool resource_update_pipes_for_plane_with_slice_count(
return result;
}
-bool dc_add_plane_to_context(
- const struct dc *dc,
- struct dc_stream_state *stream,
- struct dc_plane_state *plane_state,
- struct dc_state *context)
-{
- struct resource_pool *pool = dc->res_pool;
- struct pipe_ctx *otg_master_pipe;
- struct dc_stream_status *stream_status = NULL;
- bool added = false;
-
- stream_status = dc_stream_get_status_from_state(context, stream);
- if (stream_status == NULL) {
- dm_error("Existing stream not found; failed to attach surface!\n");
- goto out;
- } else if (stream_status->plane_count == MAX_SURFACE_NUM) {
- dm_error("Surface: can not attach plane_state %p! Maximum is: %d\n",
- plane_state, MAX_SURFACE_NUM);
- goto out;
- }
-
- otg_master_pipe = resource_get_otg_master_for_stream(
- &context->res_ctx, stream);
- added = resource_append_dpp_pipes_for_plane_composition(context,
- dc->current_state, pool, otg_master_pipe, plane_state);
-
- if (added) {
- stream_status->plane_states[stream_status->plane_count] =
- plane_state;
- stream_status->plane_count++;
- dc_plane_state_retain(plane_state);
- }
-
-out:
- return added;
-}
-
-bool dc_remove_plane_from_context(
- const struct dc *dc,
- struct dc_stream_state *stream,
- struct dc_plane_state *plane_state,
- struct dc_state *context)
-{
- int i;
- struct dc_stream_status *stream_status = NULL;
- struct resource_pool *pool = dc->res_pool;
-
- if (!plane_state)
- return true;
-
- for (i = 0; i < context->stream_count; i++)
- if (context->streams[i] == stream) {
- stream_status = &context->stream_status[i];
- break;
- }
-
- if (stream_status == NULL) {
- dm_error("Existing stream not found; failed to remove plane.\n");
- return false;
- }
-
- resource_remove_dpp_pipes_for_plane_composition(
- context, pool, plane_state);
-
- for (i = 0; i < stream_status->plane_count; i++) {
- if (stream_status->plane_states[i] == plane_state) {
- dc_plane_state_release(stream_status->plane_states[i]);
- break;
- }
- }
-
- if (i == stream_status->plane_count) {
- dm_error("Existing plane_state not found; failed to detach it!\n");
- return false;
- }
-
- stream_status->plane_count--;
-
- /* Start at the plane we've just released, and move all the planes one index forward to "trim" the array */
- for (; i < stream_status->plane_count; i++)
- stream_status->plane_states[i] = stream_status->plane_states[i + 1];
-
- stream_status->plane_states[stream_status->plane_count] = NULL;
-
- if (stream_status->plane_count == 0 && dc->config.enable_windowed_mpo_odm)
- /* ODM combine could prevent us from supporting more planes
- * we will reset ODM slice count back to 1 when all planes have
- * been removed to maximize the amount of planes supported when
- * new planes are added.
- */
- resource_update_pipes_for_stream_with_slice_count(
- context, dc->current_state, dc->res_pool, stream, 1);
-
- return true;
-}
-
-/**
- * dc_rem_all_planes_for_stream - Remove planes attached to the target stream.
- *
- * @dc: Current dc state.
- * @stream: Target stream, which we want to remove the attached plans.
- * @context: New context.
- *
- * Return:
- * Return true if DC was able to remove all planes from the target
- * stream, otherwise, return false.
- */
-bool dc_rem_all_planes_for_stream(
- const struct dc *dc,
- struct dc_stream_state *stream,
- struct dc_state *context)
-{
- int i, old_plane_count;
- struct dc_stream_status *stream_status = NULL;
- struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 };
-
- for (i = 0; i < context->stream_count; i++)
- if (context->streams[i] == stream) {
- stream_status = &context->stream_status[i];
- break;
- }
-
- if (stream_status == NULL) {
- dm_error("Existing stream %p not found!\n", stream);
- return false;
- }
-
- old_plane_count = stream_status->plane_count;
-
- for (i = 0; i < old_plane_count; i++)
- del_planes[i] = stream_status->plane_states[i];
-
- for (i = 0; i < old_plane_count; i++)
- if (!dc_remove_plane_from_context(dc, stream, del_planes[i], context))
- return false;
-
- return true;
-}
-
-static bool add_all_planes_for_stream(
- const struct dc *dc,
- struct dc_stream_state *stream,
- const struct dc_validation_set set[],
- int set_count,
- struct dc_state *context)
-{
- int i, j;
-
- for (i = 0; i < set_count; i++)
- if (set[i].stream == stream)
- break;
-
- if (i == set_count) {
- dm_error("Stream %p not found in set!\n", stream);
- return false;
- }
-
- for (j = 0; j < set[i].plane_count; j++)
- if (!dc_add_plane_to_context(dc, stream, set[i].plane_states[j], context))
- return false;
-
- return true;
-}
-
-bool dc_add_all_planes_for_stream(
- const struct dc *dc,
- struct dc_stream_state *stream,
- struct dc_plane_state * const *plane_states,
- int plane_count,
- struct dc_state *context)
-{
- struct dc_validation_set set;
- int i;
-
- set.stream = stream;
- set.plane_count = plane_count;
-
- for (i = 0; i < plane_count; i++)
- set.plane_states[i] = plane_states[i];
-
- return add_all_planes_for_stream(dc, stream, &set, 1, context);
-}
-
bool dc_is_timing_changed(struct dc_stream_state *cur_stream,
struct dc_stream_state *new_stream)
{
@@ -3327,84 +3152,6 @@ static struct audio *find_first_free_audio(
return NULL;
}
-/*
- * dc_add_stream_to_ctx() - Add a new dc_stream_state to a dc_state.
- */
-enum dc_status dc_add_stream_to_ctx(
- struct dc *dc,
- struct dc_state *new_ctx,
- struct dc_stream_state *stream)
-{
- enum dc_status res;
- DC_LOGGER_INIT(dc->ctx->logger);
-
- if (new_ctx->stream_count >= dc->res_pool->timing_generator_count) {
- DC_LOG_WARNING("Max streams reached, can't add stream %p !\n", stream);
- return DC_ERROR_UNEXPECTED;
- }
-
- new_ctx->streams[new_ctx->stream_count] = stream;
- dc_stream_retain(stream);
- new_ctx->stream_count++;
-
- res = resource_add_otg_master_for_stream_output(
- new_ctx, dc->res_pool, stream);
- if (res != DC_OK)
- DC_LOG_WARNING("Adding stream %p to context failed with err %d!\n", stream, res);
-
- return res;
-}
-
-/*
- * dc_remove_stream_from_ctx() - Remove a stream from a dc_state.
- */
-enum dc_status dc_remove_stream_from_ctx(
- struct dc *dc,
- struct dc_state *new_ctx,
- struct dc_stream_state *stream)
-{
- int i;
- struct dc_context *dc_ctx = dc->ctx;
- struct pipe_ctx *del_pipe = resource_get_otg_master_for_stream(
- &new_ctx->res_ctx, stream);
-
- if (!del_pipe) {
- DC_ERROR("Pipe not found for stream %p !\n", stream);
- return DC_ERROR_UNEXPECTED;
- }
-
- resource_update_pipes_for_stream_with_slice_count(new_ctx,
- dc->current_state, dc->res_pool, stream, 1);
- resource_remove_otg_master_for_stream_output(
- new_ctx, dc->res_pool, stream);
-
- for (i = 0; i < new_ctx->stream_count; i++)
- if (new_ctx->streams[i] == stream)
- break;
-
- if (new_ctx->streams[i] != stream) {
- DC_ERROR("Context doesn't have stream %p !\n", stream);
- return DC_ERROR_UNEXPECTED;
- }
-
- dc_stream_release(new_ctx->streams[i]);
- new_ctx->stream_count--;
-
- /* Trim back arrays */
- for (; i < new_ctx->stream_count; i++) {
- new_ctx->streams[i] = new_ctx->streams[i + 1];
- new_ctx->stream_status[i] = new_ctx->stream_status[i + 1];
- }
-
- new_ctx->streams[new_ctx->stream_count] = NULL;
- memset(
- &new_ctx->stream_status[new_ctx->stream_count],
- 0,
- sizeof(new_ctx->stream_status[0]));
-
- return DC_OK;
-}
-
static struct dc_stream_state *find_pll_sharable_stream(
struct dc_stream_state *stream_needs_pll,
struct dc_state *context)
@@ -3784,34 +3531,6 @@ enum dc_status resource_map_pool_resources(
return DC_ERROR_UNEXPECTED;
}
-/**
- * dc_resource_state_copy_construct_current() - Creates a new dc_state from existing state
- *
- * @dc: copy out of dc->current_state
- * @dst_ctx: copy into this
- *
- * This function makes a shallow copy of the current DC state and increments
- * refcounts on existing streams and planes.
- */
-void dc_resource_state_copy_construct_current(
- const struct dc *dc,
- struct dc_state *dst_ctx)
-{
- dc_resource_state_copy_construct(dc->current_state, dst_ctx);
-}
-
-
-void dc_resource_state_construct(
- const struct dc *dc,
- struct dc_state *dst_ctx)
-{
- dst_ctx->clk_mgr = dc->clk_mgr;
-
- /* Initialise DIG link encoder resource tracking variables. */
- link_enc_cfg_init(dc, dst_ctx);
-}
-
-
bool dc_resource_is_dsc_encoding_supported(const struct dc *dc)
{
if (dc->res_pool == NULL)
@@ -3855,6 +3574,31 @@ static bool planes_changed_for_existing_stream(struct dc_state *context,
return false;
}
+static bool add_all_planes_for_stream(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ const struct dc_validation_set set[],
+ int set_count,
+ struct dc_state *state)
+{
+ int i, j;
+
+ for (i = 0; i < set_count; i++)
+ if (set[i].stream == stream)
+ break;
+
+ if (i == set_count) {
+ dm_error("Stream %p not found in set!\n", stream);
+ return false;
+ }
+
+ for (j = 0; j < set[i].plane_count; j++)
+ if (!dc_state_add_plane(dc, stream, set[i].plane_states[j], state))
+ return false;
+
+ return true;
+}
+
/**
* dc_validate_with_context - Validate and update the potential new stream in the context object
*
@@ -3960,7 +3704,8 @@ enum dc_status dc_validate_with_context(struct dc *dc,
unchanged_streams[i],
set,
set_count)) {
- if (!dc_rem_all_planes_for_stream(dc,
+
+ if (!dc_state_rem_all_planes_for_stream(dc,
unchanged_streams[i],
context)) {
res = DC_FAIL_DETACH_SURFACES;
@@ -3982,12 +3727,24 @@ enum dc_status dc_validate_with_context(struct dc *dc,
}
}
- if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
- res = DC_FAIL_DETACH_SURFACES;
- goto fail;
+ if (dc_state_get_stream_subvp_type(context, del_streams[i]) == SUBVP_PHANTOM) {
+ /* remove phantoms specifically */
+ if (!dc_state_rem_all_phantom_planes_for_stream(dc, del_streams[i], context, true)) {
+ res = DC_FAIL_DETACH_SURFACES;
+ goto fail;
+ }
+
+ res = dc_state_remove_phantom_stream(dc, context, del_streams[i]);
+ dc_state_release_phantom_stream(dc, context, del_streams[i]);
+ } else {
+ if (!dc_state_rem_all_planes_for_stream(dc, del_streams[i], context)) {
+ res = DC_FAIL_DETACH_SURFACES;
+ goto fail;
+ }
+
+ res = dc_state_remove_stream(dc, context, del_streams[i]);
}
- res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
if (res != DC_OK)
goto fail;
}
@@ -4010,7 +3767,7 @@ enum dc_status dc_validate_with_context(struct dc *dc,
/* Add new streams and then add all planes for the new stream */
for (i = 0; i < add_streams_count; i++) {
calculate_phy_pix_clks(add_streams[i]);
- res = dc_add_stream_to_ctx(dc, context, add_streams[i]);
+ res = dc_state_add_stream(dc, context, add_streams[i]);
if (res != DC_OK)
goto fail;
@@ -4516,84 +4273,6 @@ static void set_vtem_info_packet(
*info_packet = stream->vtem_infopacket;
}
-void dc_resource_state_destruct(struct dc_state *context)
-{
- int i, j;
-
- for (i = 0; i < context->stream_count; i++) {
- for (j = 0; j < context->stream_status[i].plane_count; j++)
- dc_plane_state_release(
- context->stream_status[i].plane_states[j]);
-
- context->stream_status[i].plane_count = 0;
- dc_stream_release(context->streams[i]);
- context->streams[i] = NULL;
- }
- context->stream_count = 0;
- context->stream_mask = 0;
- memset(&context->res_ctx, 0, sizeof(context->res_ctx));
- memset(&context->pp_display_cfg, 0, sizeof(context->pp_display_cfg));
- memset(&context->dcn_bw_vars, 0, sizeof(context->dcn_bw_vars));
- context->clk_mgr = NULL;
- memset(&context->bw_ctx.bw, 0, sizeof(context->bw_ctx.bw));
- memset(context->block_sequence, 0, sizeof(context->block_sequence));
- context->block_sequence_steps = 0;
- memset(context->dc_dmub_cmd, 0, sizeof(context->dc_dmub_cmd));
- context->dmub_cmd_count = 0;
- memset(&context->perf_params, 0, sizeof(context->perf_params));
- memset(&context->scratch, 0, sizeof(context->scratch));
-}
-
-void dc_resource_state_copy_construct(
- const struct dc_state *src_ctx,
- struct dc_state *dst_ctx)
-{
- int i, j;
- struct kref refcount = dst_ctx->refcount;
-#ifdef CONFIG_DRM_AMD_DC_FP
- struct dml2_context *dml2 = NULL;
-
- // Need to preserve allocated dml2 context
- if (src_ctx->clk_mgr && src_ctx->clk_mgr->ctx->dc->debug.using_dml2)
- dml2 = dst_ctx->bw_ctx.dml2;
-#endif
-
- *dst_ctx = *src_ctx;
-
-#ifdef CONFIG_DRM_AMD_DC_FP
- // Preserve allocated dml2 context
- if (src_ctx->clk_mgr && src_ctx->clk_mgr->ctx->dc->debug.using_dml2)
- dst_ctx->bw_ctx.dml2 = dml2;
-#endif
-
- for (i = 0; i < MAX_PIPES; i++) {
- struct pipe_ctx *cur_pipe = &dst_ctx->res_ctx.pipe_ctx[i];
-
- if (cur_pipe->top_pipe)
- cur_pipe->top_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
-
- if (cur_pipe->bottom_pipe)
- cur_pipe->bottom_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
-
- if (cur_pipe->next_odm_pipe)
- cur_pipe->next_odm_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx];
-
- if (cur_pipe->prev_odm_pipe)
- cur_pipe->prev_odm_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx];
- }
-
- for (i = 0; i < dst_ctx->stream_count; i++) {
- dc_stream_retain(dst_ctx->streams[i]);
- for (j = 0; j < dst_ctx->stream_status[i].plane_count; j++)
- dc_plane_state_retain(
- dst_ctx->stream_status[i].plane_states[j]);
- }
-
- /* context refcount should not be overridden */
- dst_ctx->refcount = refcount;
-
-}
-
struct clock_source *dc_resource_find_first_free_pll(
struct resource_context *res_ctx,
const struct resource_pool *pool)
@@ -4773,7 +4452,7 @@ void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream,
option = DITHER_OPTION_SPATIAL8;
break;
case COLOR_DEPTH_101010:
- option = DITHER_OPTION_SPATIAL10;
+ option = DITHER_OPTION_TRUN10;
break;
default:
option = DITHER_OPTION_DISABLE;
@@ -4799,6 +4478,8 @@ void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream,
option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) {
fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
fmt_bit_depth->flags.TRUNCATE_DEPTH = 2;
+ if (option == DITHER_OPTION_TRUN10)
+ fmt_bit_depth->flags.TRUNCATE_MODE = 1;
}
/* special case - Formatter can only reduce by 4 bits at most.
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
new file mode 100644
index 000000000000..88c6436b28b6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
@@ -0,0 +1,867 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "core_types.h"
+#include "core_status.h"
+#include "dc_state.h"
+#include "dc_state_priv.h"
+#include "dc_stream_priv.h"
+#include "dc_plane_priv.h"
+
+#include "dm_services.h"
+#include "resource.h"
+#include "link_enc_cfg.h"
+
+#include "dml2/dml2_wrapper.h"
+#include "dml2/dml2_internal_types.h"
+
+#define DC_LOGGER \
+ dc->ctx->logger
+#define DC_LOGGER_INIT(logger)
+
+/* Private dc_state helper functions */
+static bool dc_state_track_phantom_stream(struct dc_state *state,
+ struct dc_stream_state *phantom_stream)
+{
+ if (state->phantom_stream_count >= MAX_PHANTOM_PIPES)
+ return false;
+
+ state->phantom_streams[state->phantom_stream_count++] = phantom_stream;
+
+ return true;
+}
+
+static bool dc_state_untrack_phantom_stream(struct dc_state *state, struct dc_stream_state *phantom_stream)
+{
+ bool res = false;
+ int i;
+
+ /* first find phantom stream in the dc_state */
+ for (i = 0; i < state->phantom_stream_count; i++) {
+ if (state->phantom_streams[i] == phantom_stream) {
+ state->phantom_streams[i] = NULL;
+ res = true;
+ break;
+ }
+ }
+
+ /* failed to find stream in state */
+ if (!res)
+ return res;
+
+ /* trim back phantom streams */
+ state->phantom_stream_count--;
+ for (; i < state->phantom_stream_count; i++)
+ state->phantom_streams[i] = state->phantom_streams[i + 1];
+
+ return res;
+}
+
+static bool dc_state_is_phantom_stream_tracked(struct dc_state *state, struct dc_stream_state *phantom_stream)
+{
+ int i;
+
+ for (i = 0; i < state->phantom_stream_count; i++) {
+ if (state->phantom_streams[i] == phantom_stream)
+ return true;
+ }
+
+ return false;
+}
+
+static bool dc_state_track_phantom_plane(struct dc_state *state,
+ struct dc_plane_state *phantom_plane)
+{
+ if (state->phantom_plane_count >= MAX_PHANTOM_PIPES)
+ return false;
+
+ state->phantom_planes[state->phantom_plane_count++] = phantom_plane;
+
+ return true;
+}
+
+static bool dc_state_untrack_phantom_plane(struct dc_state *state, struct dc_plane_state *phantom_plane)
+{
+ bool res = false;
+ int i;
+
+ /* first find phantom plane in the dc_state */
+ for (i = 0; i < state->phantom_plane_count; i++) {
+ if (state->phantom_planes[i] == phantom_plane) {
+ state->phantom_planes[i] = NULL;
+ res = true;
+ break;
+ }
+ }
+
+ /* failed to find plane in state */
+ if (!res)
+ return res;
+
+ /* trim back phantom planes */
+ state->phantom_plane_count--;
+ for (; i < state->phantom_plane_count; i++)
+ state->phantom_planes[i] = state->phantom_planes[i + 1];
+
+ return res;
+}
+
+static bool dc_state_is_phantom_plane_tracked(struct dc_state *state, struct dc_plane_state *phantom_plane)
+{
+ int i;
+
+ for (i = 0; i < state->phantom_plane_count; i++) {
+ if (state->phantom_planes[i] == phantom_plane)
+ return true;
+ }
+
+ return false;
+}
+
+static void dc_state_copy_internal(struct dc_state *dst_state, struct dc_state *src_state)
+{
+ int i, j;
+
+ memcpy(dst_state, src_state, sizeof(struct dc_state));
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *cur_pipe = &dst_state->res_ctx.pipe_ctx[i];
+
+ if (cur_pipe->top_pipe)
+ cur_pipe->top_pipe = &dst_state->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
+
+ if (cur_pipe->bottom_pipe)
+ cur_pipe->bottom_pipe = &dst_state->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
+
+ if (cur_pipe->prev_odm_pipe)
+ cur_pipe->prev_odm_pipe = &dst_state->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx];
+
+ if (cur_pipe->next_odm_pipe)
+ cur_pipe->next_odm_pipe = &dst_state->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx];
+ }
+
+ /* retain phantoms */
+ for (i = 0; i < dst_state->phantom_stream_count; i++)
+ dc_stream_retain(dst_state->phantom_streams[i]);
+
+ for (i = 0; i < dst_state->phantom_plane_count; i++)
+ dc_plane_state_retain(dst_state->phantom_planes[i]);
+
+ /* retain streams and planes */
+ for (i = 0; i < dst_state->stream_count; i++) {
+ dc_stream_retain(dst_state->streams[i]);
+ for (j = 0; j < dst_state->stream_status[i].plane_count; j++)
+ dc_plane_state_retain(
+ dst_state->stream_status[i].plane_states[j]);
+ }
+
+}
+
+static void init_state(struct dc *dc, struct dc_state *state)
+{
+ /* Each context must have their own instance of VBA and in order to
+ * initialize and obtain IP and SOC the base DML instance from DC is
+ * initially copied into every context
+ */
+ memcpy(&state->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
+}
+
+/* Public dc_state functions */
+struct dc_state *dc_state_create(struct dc *dc)
+{
+ struct dc_state *state = kvzalloc(sizeof(struct dc_state),
+ GFP_KERNEL);
+
+ if (!state)
+ return NULL;
+
+ init_state(dc, state);
+ dc_state_construct(dc, state);
+
+#ifdef CONFIG_DRM_AMD_DC_FP
+ if (dc->debug.using_dml2)
+ dml2_create(dc, &dc->dml2_options, &state->bw_ctx.dml2);
+#endif
+
+ kref_init(&state->refcount);
+
+ return state;
+}
+
+void dc_state_copy(struct dc_state *dst_state, struct dc_state *src_state)
+{
+ struct kref refcount = dst_state->refcount;
+#ifdef CONFIG_DRM_AMD_DC_FP
+ struct dml2_context *dst_dml2 = dst_state->bw_ctx.dml2;
+#endif
+
+ dc_state_copy_internal(dst_state, src_state);
+
+#ifdef CONFIG_DRM_AMD_DC_FP
+ dst_state->bw_ctx.dml2 = dst_dml2;
+ if (src_state->bw_ctx.dml2)
+ dml2_copy(dst_state->bw_ctx.dml2, src_state->bw_ctx.dml2);
+#endif
+
+ /* context refcount should not be overridden */
+ dst_state->refcount = refcount;
+}
+
+struct dc_state *dc_state_create_copy(struct dc_state *src_state)
+{
+ struct dc_state *new_state;
+
+ new_state = kvmalloc(sizeof(struct dc_state),
+ GFP_KERNEL);
+ if (!new_state)
+ return NULL;
+
+ dc_state_copy_internal(new_state, src_state);
+
+#ifdef CONFIG_DRM_AMD_DC_FP
+ if (src_state->bw_ctx.dml2 &&
+ !dml2_create_copy(&new_state->bw_ctx.dml2, src_state->bw_ctx.dml2)) {
+ dc_state_release(new_state);
+ return NULL;
+ }
+#endif
+
+ kref_init(&new_state->refcount);
+
+ return new_state;
+}
+
+void dc_state_copy_current(struct dc *dc, struct dc_state *dst_state)
+{
+ dc_state_copy(dst_state, dc->current_state);
+}
+
+struct dc_state *dc_state_create_current_copy(struct dc *dc)
+{
+ return dc_state_create_copy(dc->current_state);
+}
+
+void dc_state_construct(struct dc *dc, struct dc_state *state)
+{
+ state->clk_mgr = dc->clk_mgr;
+
+ /* Initialise DIG link encoder resource tracking variables. */
+ if (dc->res_pool)
+ link_enc_cfg_init(dc, state);
+}
+
+void dc_state_destruct(struct dc_state *state)
+{
+ int i, j;
+
+ for (i = 0; i < state->stream_count; i++) {
+ for (j = 0; j < state->stream_status[i].plane_count; j++)
+ dc_plane_state_release(
+ state->stream_status[i].plane_states[j]);
+
+ state->stream_status[i].plane_count = 0;
+ dc_stream_release(state->streams[i]);
+ state->streams[i] = NULL;
+ }
+ state->stream_count = 0;
+
+ /* release tracked phantoms */
+ for (i = 0; i < state->phantom_stream_count; i++) {
+ dc_stream_release(state->phantom_streams[i]);
+ state->phantom_streams[i] = NULL;
+ }
+
+ for (i = 0; i < state->phantom_plane_count; i++) {
+ dc_plane_state_release(state->phantom_planes[i]);
+ state->phantom_planes[i] = NULL;
+ }
+ state->stream_mask = 0;
+ memset(&state->res_ctx, 0, sizeof(state->res_ctx));
+ memset(&state->pp_display_cfg, 0, sizeof(state->pp_display_cfg));
+ memset(&state->dcn_bw_vars, 0, sizeof(state->dcn_bw_vars));
+ state->clk_mgr = NULL;
+ memset(&state->bw_ctx.bw, 0, sizeof(state->bw_ctx.bw));
+ memset(state->block_sequence, 0, sizeof(state->block_sequence));
+ state->block_sequence_steps = 0;
+ memset(state->dc_dmub_cmd, 0, sizeof(state->dc_dmub_cmd));
+ state->dmub_cmd_count = 0;
+ memset(&state->perf_params, 0, sizeof(state->perf_params));
+ memset(&state->scratch, 0, sizeof(state->scratch));
+}
+
+void dc_state_retain(struct dc_state *state)
+{
+ kref_get(&state->refcount);
+}
+
+static void dc_state_free(struct kref *kref)
+{
+ struct dc_state *state = container_of(kref, struct dc_state, refcount);
+
+ dc_state_destruct(state);
+
+#ifdef CONFIG_DRM_AMD_DC_FP
+ dml2_destroy(state->bw_ctx.dml2);
+ state->bw_ctx.dml2 = 0;
+#endif
+
+ kvfree(state);
+}
+
+void dc_state_release(struct dc_state *state)
+{
+ kref_put(&state->refcount, dc_state_free);
+}
+/*
+ * dc_state_add_stream() - Add a new dc_stream_state to a dc_state.
+ */
+enum dc_status dc_state_add_stream(
+ struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *stream)
+{
+ enum dc_status res;
+
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ if (state->stream_count >= dc->res_pool->timing_generator_count) {
+ DC_LOG_WARNING("Max streams reached, can't add stream %p !\n", stream);
+ return DC_ERROR_UNEXPECTED;
+ }
+
+ state->streams[state->stream_count] = stream;
+ dc_stream_retain(stream);
+ state->stream_count++;
+
+ res = resource_add_otg_master_for_stream_output(
+ state, dc->res_pool, stream);
+ if (res != DC_OK)
+ DC_LOG_WARNING("Adding stream %p to context failed with err %d!\n", stream, res);
+
+ return res;
+}
+
+/*
+ * dc_state_remove_stream() - Remove a stream from a dc_state.
+ */
+enum dc_status dc_state_remove_stream(
+ struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *stream)
+{
+ int i;
+ struct pipe_ctx *del_pipe = resource_get_otg_master_for_stream(
+ &state->res_ctx, stream);
+
+ if (!del_pipe) {
+ dm_error("Pipe not found for stream %p !\n", stream);
+ return DC_ERROR_UNEXPECTED;
+ }
+
+ resource_update_pipes_for_stream_with_slice_count(state,
+ dc->current_state, dc->res_pool, stream, 1);
+ resource_remove_otg_master_for_stream_output(
+ state, dc->res_pool, stream);
+
+ for (i = 0; i < state->stream_count; i++)
+ if (state->streams[i] == stream)
+ break;
+
+ if (state->streams[i] != stream) {
+ dm_error("Context doesn't have stream %p !\n", stream);
+ return DC_ERROR_UNEXPECTED;
+ }
+
+ dc_stream_release(state->streams[i]);
+ state->stream_count--;
+
+ /* Trim back arrays */
+ for (; i < state->stream_count; i++) {
+ state->streams[i] = state->streams[i + 1];
+ state->stream_status[i] = state->stream_status[i + 1];
+ }
+
+ state->streams[state->stream_count] = NULL;
+ memset(
+ &state->stream_status[state->stream_count],
+ 0,
+ sizeof(state->stream_status[0]));
+
+ return DC_OK;
+}
+
+bool dc_state_add_plane(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_plane_state *plane_state,
+ struct dc_state *state)
+{
+ struct resource_pool *pool = dc->res_pool;
+ struct pipe_ctx *otg_master_pipe;
+ struct dc_stream_status *stream_status = NULL;
+ bool added = false;
+
+ stream_status = dc_state_get_stream_status(state, stream);
+ if (stream_status == NULL) {
+ dm_error("Existing stream not found; failed to attach surface!\n");
+ goto out;
+ } else if (stream_status->plane_count == MAX_SURFACE_NUM) {
+ dm_error("Surface: can not attach plane_state %p! Maximum is: %d\n",
+ plane_state, MAX_SURFACE_NUM);
+ goto out;
+ }
+
+ otg_master_pipe = resource_get_otg_master_for_stream(
+ &state->res_ctx, stream);
+ if (otg_master_pipe)
+ added = resource_append_dpp_pipes_for_plane_composition(state,
+ dc->current_state, pool, otg_master_pipe, plane_state);
+
+ if (added) {
+ stream_status->plane_states[stream_status->plane_count] =
+ plane_state;
+ stream_status->plane_count++;
+ dc_plane_state_retain(plane_state);
+ }
+
+out:
+ return added;
+}
+
+bool dc_state_remove_plane(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_plane_state *plane_state,
+ struct dc_state *state)
+{
+ int i;
+ struct dc_stream_status *stream_status = NULL;
+ struct resource_pool *pool = dc->res_pool;
+
+ if (!plane_state)
+ return true;
+
+ for (i = 0; i < state->stream_count; i++)
+ if (state->streams[i] == stream) {
+ stream_status = &state->stream_status[i];
+ break;
+ }
+
+ if (stream_status == NULL) {
+ dm_error("Existing stream not found; failed to remove plane.\n");
+ return false;
+ }
+
+ resource_remove_dpp_pipes_for_plane_composition(
+ state, pool, plane_state);
+
+ for (i = 0; i < stream_status->plane_count; i++) {
+ if (stream_status->plane_states[i] == plane_state) {
+ dc_plane_state_release(stream_status->plane_states[i]);
+ break;
+ }
+ }
+
+ if (i == stream_status->plane_count) {
+ dm_error("Existing plane_state not found; failed to detach it!\n");
+ return false;
+ }
+
+ stream_status->plane_count--;
+
+ /* Start at the plane we've just released, and move all the planes one index forward to "trim" the array */
+ for (; i < stream_status->plane_count; i++)
+ stream_status->plane_states[i] = stream_status->plane_states[i + 1];
+
+ stream_status->plane_states[stream_status->plane_count] = NULL;
+
+ if (stream_status->plane_count == 0 && dc->config.enable_windowed_mpo_odm)
+ /* ODM combine could prevent us from supporting more planes
+ * we will reset ODM slice count back to 1 when all planes have
+ * been removed to maximize the amount of planes supported when
+ * new planes are added.
+ */
+ resource_update_pipes_for_stream_with_slice_count(
+ state, dc->current_state, dc->res_pool, stream, 1);
+
+ return true;
+}
+
+/**
+ * dc_state_rem_all_planes_for_stream - Remove planes attached to the target stream.
+ *
+ * @dc: Current dc state.
+ * @stream: Target stream, which we want to remove the attached plans.
+ * @state: context from which the planes are to be removed.
+ *
+ * Return:
+ * Return true if DC was able to remove all planes from the target
+ * stream, otherwise, return false.
+ */
+bool dc_state_rem_all_planes_for_stream(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_state *state)
+{
+ int i, old_plane_count;
+ struct dc_stream_status *stream_status = NULL;
+ struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 };
+
+ for (i = 0; i < state->stream_count; i++)
+ if (state->streams[i] == stream) {
+ stream_status = &state->stream_status[i];
+ break;
+ }
+
+ if (stream_status == NULL) {
+ dm_error("Existing stream %p not found!\n", stream);
+ return false;
+ }
+
+ old_plane_count = stream_status->plane_count;
+
+ for (i = 0; i < old_plane_count; i++)
+ del_planes[i] = stream_status->plane_states[i];
+
+ for (i = 0; i < old_plane_count; i++)
+ if (!dc_state_remove_plane(dc, stream, del_planes[i], state))
+ return false;
+
+ return true;
+}
+
+bool dc_state_add_all_planes_for_stream(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_plane_state * const *plane_states,
+ int plane_count,
+ struct dc_state *state)
+{
+ int i;
+ bool result = true;
+
+ for (i = 0; i < plane_count; i++)
+ if (!dc_state_add_plane(dc, stream, plane_states[i], state)) {
+ result = false;
+ break;
+ }
+
+ return result;
+}
+
+/* Private dc_state functions */
+
+/**
+ * dc_state_get_stream_status - Get stream status from given dc state
+ * @state: DC state to find the stream status in
+ * @stream: The stream to get the stream status for
+ *
+ * The given stream is expected to exist in the given dc state. Otherwise, NULL
+ * will be returned.
+ */
+struct dc_stream_status *dc_state_get_stream_status(
+ struct dc_state *state,
+ struct dc_stream_state *stream)
+{
+ uint8_t i;
+
+ if (state == NULL)
+ return NULL;
+
+ for (i = 0; i < state->stream_count; i++) {
+ if (stream == state->streams[i])
+ return &state->stream_status[i];
+ }
+
+ return NULL;
+}
+
+enum mall_stream_type dc_state_get_pipe_subvp_type(const struct dc_state *state,
+ const struct pipe_ctx *pipe_ctx)
+{
+ return dc_state_get_stream_subvp_type(state, pipe_ctx->stream);
+}
+
+enum mall_stream_type dc_state_get_stream_subvp_type(const struct dc_state *state,
+ const struct dc_stream_state *stream)
+{
+ int i;
+
+ enum mall_stream_type type = SUBVP_NONE;
+
+ for (i = 0; i < state->stream_count; i++) {
+ if (state->streams[i] == stream) {
+ type = state->stream_status[i].mall_stream_config.type;
+ break;
+ }
+ }
+
+ return type;
+}
+
+struct dc_stream_state *dc_state_get_paired_subvp_stream(const struct dc_state *state,
+ const struct dc_stream_state *stream)
+{
+ int i;
+
+ struct dc_stream_state *paired_stream = NULL;
+
+ for (i = 0; i < state->stream_count; i++) {
+ if (state->streams[i] == stream) {
+ paired_stream = state->stream_status[i].mall_stream_config.paired_stream;
+ break;
+ }
+ }
+
+ return paired_stream;
+}
+
+struct dc_stream_state *dc_state_create_phantom_stream(const struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *main_stream)
+{
+ struct dc_stream_state *phantom_stream;
+
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ phantom_stream = dc_create_stream_for_sink(main_stream->sink);
+
+ if (!phantom_stream) {
+ DC_LOG_ERROR("Failed to allocate phantom stream.\n");
+ return NULL;
+ }
+
+ /* track phantom stream in dc_state */
+ dc_state_track_phantom_stream(state, phantom_stream);
+
+ phantom_stream->is_phantom = true;
+ phantom_stream->signal = SIGNAL_TYPE_VIRTUAL;
+ phantom_stream->dpms_off = true;
+
+ return phantom_stream;
+}
+
+void dc_state_release_phantom_stream(const struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *phantom_stream)
+{
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ if (!dc_state_untrack_phantom_stream(state, phantom_stream)) {
+ DC_LOG_ERROR("Failed to free phantom stream %p in dc state %p.\n", phantom_stream, state);
+ return;
+ }
+
+ dc_stream_release(phantom_stream);
+}
+
+struct dc_plane_state *dc_state_create_phantom_plane(struct dc *dc,
+ struct dc_state *state,
+ struct dc_plane_state *main_plane)
+{
+ struct dc_plane_state *phantom_plane = dc_create_plane_state(dc);
+
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ if (!phantom_plane) {
+ DC_LOG_ERROR("Failed to allocate phantom plane.\n");
+ return NULL;
+ }
+
+ /* track phantom inside dc_state */
+ dc_state_track_phantom_plane(state, phantom_plane);
+
+ phantom_plane->is_phantom = true;
+
+ return phantom_plane;
+}
+
+void dc_state_release_phantom_plane(const struct dc *dc,
+ struct dc_state *state,
+ struct dc_plane_state *phantom_plane)
+{
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ if (!dc_state_untrack_phantom_plane(state, phantom_plane)) {
+ DC_LOG_ERROR("Failed to free phantom plane %p in dc state %p.\n", phantom_plane, state);
+ return;
+ }
+
+ dc_plane_state_release(phantom_plane);
+}
+
+/* add phantom streams to context and generate correct meta inside dc_state */
+enum dc_status dc_state_add_phantom_stream(struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *phantom_stream,
+ struct dc_stream_state *main_stream)
+{
+ struct dc_stream_status *main_stream_status;
+ struct dc_stream_status *phantom_stream_status;
+ enum dc_status res = dc_state_add_stream(dc, state, phantom_stream);
+
+ /* check if stream is tracked */
+ if (res == DC_OK && !dc_state_is_phantom_stream_tracked(state, phantom_stream)) {
+ /* stream must be tracked if added to state */
+ dc_state_track_phantom_stream(state, phantom_stream);
+ }
+
+ /* setup subvp meta */
+ main_stream_status = dc_state_get_stream_status(state, main_stream);
+ phantom_stream_status = dc_state_get_stream_status(state, phantom_stream);
+ phantom_stream_status->mall_stream_config.type = SUBVP_PHANTOM;
+ phantom_stream_status->mall_stream_config.paired_stream = main_stream;
+ main_stream_status->mall_stream_config.type = SUBVP_MAIN;
+ main_stream_status->mall_stream_config.paired_stream = phantom_stream;
+
+ return res;
+}
+
+enum dc_status dc_state_remove_phantom_stream(struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *phantom_stream)
+{
+ struct dc_stream_status *main_stream_status;
+ struct dc_stream_status *phantom_stream_status;
+
+ /* reset subvp meta */
+ phantom_stream_status = dc_state_get_stream_status(state, phantom_stream);
+ main_stream_status = dc_state_get_stream_status(state, phantom_stream_status->mall_stream_config.paired_stream);
+ phantom_stream_status->mall_stream_config.type = SUBVP_NONE;
+ phantom_stream_status->mall_stream_config.paired_stream = NULL;
+ if (main_stream_status) {
+ main_stream_status->mall_stream_config.type = SUBVP_NONE;
+ main_stream_status->mall_stream_config.paired_stream = NULL;
+ }
+
+ /* remove stream from state */
+ return dc_state_remove_stream(dc, state, phantom_stream);
+}
+
+bool dc_state_add_phantom_plane(
+ const struct dc *dc,
+ struct dc_stream_state *phantom_stream,
+ struct dc_plane_state *phantom_plane,
+ struct dc_state *state)
+{
+ bool res = dc_state_add_plane(dc, phantom_stream, phantom_plane, state);
+
+ /* check if stream is tracked */
+ if (res && !dc_state_is_phantom_plane_tracked(state, phantom_plane)) {
+ /* stream must be tracked if added to state */
+ dc_state_track_phantom_plane(state, phantom_plane);
+ }
+
+ return res;
+}
+
+bool dc_state_remove_phantom_plane(
+ const struct dc *dc,
+ struct dc_stream_state *phantom_stream,
+ struct dc_plane_state *phantom_plane,
+ struct dc_state *state)
+{
+ return dc_state_remove_plane(dc, phantom_stream, phantom_plane, state);
+}
+
+bool dc_state_rem_all_phantom_planes_for_stream(
+ const struct dc *dc,
+ struct dc_stream_state *phantom_stream,
+ struct dc_state *state,
+ bool should_release_planes)
+{
+ int i, old_plane_count;
+ struct dc_stream_status *stream_status = NULL;
+ struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 };
+
+ for (i = 0; i < state->stream_count; i++)
+ if (state->streams[i] == phantom_stream) {
+ stream_status = &state->stream_status[i];
+ break;
+ }
+
+ if (stream_status == NULL) {
+ dm_error("Existing stream %p not found!\n", phantom_stream);
+ return false;
+ }
+
+ old_plane_count = stream_status->plane_count;
+
+ for (i = 0; i < old_plane_count; i++)
+ del_planes[i] = stream_status->plane_states[i];
+
+ for (i = 0; i < old_plane_count; i++) {
+ if (!dc_state_remove_plane(dc, phantom_stream, del_planes[i], state))
+ return false;
+ if (should_release_planes)
+ dc_state_release_phantom_plane(dc, state, del_planes[i]);
+ }
+
+ return true;
+}
+
+bool dc_state_add_all_phantom_planes_for_stream(
+ const struct dc *dc,
+ struct dc_stream_state *phantom_stream,
+ struct dc_plane_state * const *phantom_planes,
+ int plane_count,
+ struct dc_state *state)
+{
+ return dc_state_add_all_planes_for_stream(dc, phantom_stream, phantom_planes, plane_count, state);
+}
+
+bool dc_state_remove_phantom_streams_and_planes(
+ struct dc *dc,
+ struct dc_state *state)
+{
+ int i;
+ bool removed_phantom = false;
+ struct dc_stream_state *phantom_stream = NULL;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &state->res_ctx.pipe_ctx[i];
+
+ if (pipe->plane_state && pipe->stream && dc_state_get_pipe_subvp_type(state, pipe) == SUBVP_PHANTOM) {
+ phantom_stream = pipe->stream;
+
+ dc_state_rem_all_phantom_planes_for_stream(dc, phantom_stream, state, false);
+ dc_state_remove_phantom_stream(dc, state, phantom_stream);
+ removed_phantom = true;
+ }
+ }
+ return removed_phantom;
+}
+
+void dc_state_release_phantom_streams_and_planes(
+ struct dc *dc,
+ struct dc_state *state)
+{
+ int i;
+
+ for (i = 0; i < state->phantom_stream_count; i++)
+ dc_state_release_phantom_stream(dc, state, state->phantom_streams[i]);
+
+ for (i = 0; i < state->phantom_plane_count; i++)
+ dc_state_release_phantom_plane(dc, state, state->phantom_planes[i]);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 4bdf105d1d71..54670e0b1518 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -31,6 +31,8 @@
#include "ipp.h"
#include "timing_generator.h"
#include "dc_dmub_srv.h"
+#include "dc_state_priv.h"
+#include "dc_stream_priv.h"
#define DC_LOGGER dc->ctx->logger
@@ -54,7 +56,7 @@ void update_stream_signal(struct dc_stream_state *stream, struct dc_sink *sink)
}
}
-static bool dc_stream_construct(struct dc_stream_state *stream,
+bool dc_stream_construct(struct dc_stream_state *stream,
struct dc_sink *dc_sink_data)
{
uint32_t i = 0;
@@ -121,13 +123,12 @@ static bool dc_stream_construct(struct dc_stream_state *stream,
}
stream->out_transfer_func->type = TF_TYPE_BYPASS;
- stream->stream_id = stream->ctx->dc_stream_id_count;
- stream->ctx->dc_stream_id_count++;
+ dc_stream_assign_stream_id(stream);
return true;
}
-static void dc_stream_destruct(struct dc_stream_state *stream)
+void dc_stream_destruct(struct dc_stream_state *stream)
{
dc_sink_release(stream->sink);
if (stream->out_transfer_func != NULL) {
@@ -136,6 +137,13 @@ static void dc_stream_destruct(struct dc_stream_state *stream)
}
}
+void dc_stream_assign_stream_id(struct dc_stream_state *stream)
+{
+ /* MSB is reserved to indicate phantoms */
+ stream->stream_id = stream->ctx->dc_stream_id_count;
+ stream->ctx->dc_stream_id_count++;
+}
+
void dc_stream_retain(struct dc_stream_state *stream)
{
kref_get(&stream->refcount);
@@ -196,8 +204,7 @@ struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream)
if (new_stream->out_transfer_func)
dc_transfer_func_retain(new_stream->out_transfer_func);
- new_stream->stream_id = new_stream->ctx->dc_stream_id_count;
- new_stream->ctx->dc_stream_id_count++;
+ dc_stream_assign_stream_id(new_stream);
/* If using dynamic encoder assignment, wait till stream committed to assign encoder. */
if (new_stream->ctx->dc->res_pool->funcs->link_encs_assign)
@@ -209,31 +216,6 @@ struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream)
}
/**
- * dc_stream_get_status_from_state - Get stream status from given dc state
- * @state: DC state to find the stream status in
- * @stream: The stream to get the stream status for
- *
- * The given stream is expected to exist in the given dc state. Otherwise, NULL
- * will be returned.
- */
-struct dc_stream_status *dc_stream_get_status_from_state(
- struct dc_state *state,
- struct dc_stream_state *stream)
-{
- uint8_t i;
-
- if (state == NULL)
- return NULL;
-
- for (i = 0; i < state->stream_count; i++) {
- if (stream == state->streams[i])
- return &state->stream_status[i];
- }
-
- return NULL;
-}
-
-/**
* dc_stream_get_status() - Get current stream status of the given stream state
* @stream: The stream to get the stream status for.
*
@@ -244,7 +226,7 @@ struct dc_stream_status *dc_stream_get_status(
struct dc_stream_state *stream)
{
struct dc *dc = stream->ctx->dc;
- return dc_stream_get_status_from_state(dc->current_state, stream);
+ return dc_state_get_stream_status(dc->current_state, stream);
}
static void program_cursor_attributes(
@@ -465,16 +447,37 @@ bool dc_stream_add_writeback(struct dc *dc,
if (dc->hwss.enable_writeback) {
struct dc_stream_status *stream_status = dc_stream_get_status(stream);
struct dwbc *dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
- dwb->otg_inst = stream_status->primary_otg_inst;
+ if (stream_status)
+ dwb->otg_inst = stream_status->primary_otg_inst;
}
+
+ if (!dc->hwss.update_bandwidth(dc, dc->current_state)) {
+ dm_error("DC: update_bandwidth failed!\n");
+ return false;
+ }
+
+ /* enable writeback */
+ if (dc->hwss.enable_writeback) {
+ struct dwbc *dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
+
+ if (dwb->funcs->is_enabled(dwb)) {
+ /* writeback pipe already enabled, only need to update */
+ dc->hwss.update_writeback(dc, wb_info, dc->current_state);
+ } else {
+ /* Enable writeback pipe from scratch*/
+ dc->hwss.enable_writeback(dc, wb_info, dc->current_state);
+ }
+ }
+
return true;
}
-bool dc_stream_remove_writeback(struct dc *dc,
+bool dc_stream_fc_disable_writeback(struct dc *dc,
struct dc_stream_state *stream,
uint32_t dwb_pipe_inst)
{
- int i = 0, j = 0;
+ struct dwbc *dwb = dc->res_pool->dwbc[dwb_pipe_inst];
+
if (stream == NULL) {
dm_error("DC: dc_stream is NULL!\n");
return false;
@@ -490,27 +493,63 @@ bool dc_stream_remove_writeback(struct dc *dc,
return false;
}
-// stream->writeback_info[dwb_pipe_inst].wb_enabled = false;
- for (i = 0; i < stream->num_wb_info; i++) {
- /*dynamic update*/
- if (stream->writeback_info[i].wb_enabled &&
- stream->writeback_info[i].dwb_pipe_inst == dwb_pipe_inst) {
- stream->writeback_info[i].wb_enabled = false;
- }
+ if (dwb->funcs->set_fc_enable)
+ dwb->funcs->set_fc_enable(dwb, DWB_FRAME_CAPTURE_DISABLE);
+
+ return true;
+}
+
+bool dc_stream_remove_writeback(struct dc *dc,
+ struct dc_stream_state *stream,
+ uint32_t dwb_pipe_inst)
+{
+ int i = 0, j = 0;
+ if (stream == NULL) {
+ dm_error("DC: dc_stream is NULL!\n");
+ return false;
+ }
+
+ if (dwb_pipe_inst >= MAX_DWB_PIPES) {
+ dm_error("DC: writeback pipe is invalid!\n");
+ return false;
+ }
+
+ if (stream->num_wb_info > MAX_DWB_PIPES) {
+ dm_error("DC: num_wb_info is invalid!\n");
+ return false;
}
/* remove writeback info for disabled writeback pipes from stream */
for (i = 0, j = 0; i < stream->num_wb_info; i++) {
if (stream->writeback_info[i].wb_enabled) {
- if (j < i)
- /* trim the array */
+
+ if (stream->writeback_info[i].dwb_pipe_inst == dwb_pipe_inst)
+ stream->writeback_info[i].wb_enabled = false;
+
+ /* trim the array */
+ if (j < i) {
memcpy(&stream->writeback_info[j], &stream->writeback_info[i],
sizeof(struct dc_writeback_info));
- j++;
+ j++;
+ }
}
}
stream->num_wb_info = j;
+ /* recalculate and apply DML parameters */
+ if (!dc->hwss.update_bandwidth(dc, dc->current_state)) {
+ dm_error("DC: update_bandwidth failed!\n");
+ return false;
+ }
+
+ /* disable writeback */
+ if (dc->hwss.disable_writeback) {
+ struct dwbc *dwb = dc->res_pool->dwbc[dwb_pipe_inst];
+
+ if (dwb->funcs->is_enabled(dwb))
+ dc->hwss.disable_writeback(dc, dwb_pipe_inst);
+ }
+
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index a80e45300783..19a2c7140ae8 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -32,10 +32,12 @@
#include "transform.h"
#include "dpp.h"
+#include "dc_plane_priv.h"
+
/*******************************************************************************
* Private functions
******************************************************************************/
-static void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_state)
+void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_state)
{
plane_state->ctx = ctx;
@@ -63,7 +65,7 @@ static void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *pl
}
-static void dc_plane_destruct(struct dc_plane_state *plane_state)
+void dc_plane_destruct(struct dc_plane_state *plane_state)
{
if (plane_state->gamma_correction != NULL) {
dc_gamma_release(&plane_state->gamma_correction);
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index e8c1599ab18a..5d7aa882416b 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -27,6 +27,8 @@
#define DC_INTERFACE_H_
#include "dc_types.h"
+#include "dc_state.h"
+#include "dc_plane.h"
#include "grph_object_defs.h"
#include "logger_types.h"
#include "hdcp_msg_types.h"
@@ -49,7 +51,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.262"
+#define DC_VER "3.2.266"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -461,6 +463,12 @@ enum dml_hostvm_override_opts {
DML_HOSTVM_OVERRIDE_TRUE = 0x2,
};
+enum dc_replay_power_opts {
+ replay_power_opt_invalid = 0x0,
+ replay_power_opt_smu_opt_static_screen = 0x1,
+ replay_power_opt_z10_static_screen = 0x10,
+};
+
enum dcc_option {
DCC_ENABLE = 0,
DCC_DISABLE = 1,
@@ -979,6 +987,8 @@ struct dc_debug_options {
unsigned int ips2_eval_delay_us;
unsigned int ips2_entry_delay_us;
bool disable_timeout;
+ bool disable_extblankadj;
+ unsigned int static_screen_wait_frames;
};
struct gpu_info_soc_bounding_box_v1_0;
@@ -1389,13 +1399,6 @@ struct dc_surface_update {
/*
* Create a new surface with default parameters;
*/
-struct dc_plane_state *dc_create_plane_state(struct dc *dc);
-const struct dc_plane_status *dc_plane_get_status(
- const struct dc_plane_state *plane_state);
-
-void dc_plane_state_retain(struct dc_plane_state *plane_state);
-void dc_plane_state_release(struct dc_plane_state *plane_state);
-
void dc_gamma_retain(struct dc_gamma *dc_gamma);
void dc_gamma_release(struct dc_gamma **dc_gamma);
struct dc_gamma *dc_create_gamma(void);
@@ -1459,37 +1462,20 @@ enum dc_status dc_validate_global_state(
struct dc_state *new_ctx,
bool fast_validate);
-
-void dc_resource_state_construct(
- const struct dc *dc,
- struct dc_state *dst_ctx);
-
bool dc_acquire_release_mpc_3dlut(
struct dc *dc, bool acquire,
struct dc_stream_state *stream,
struct dc_3dlut **lut,
struct dc_transfer_func **shaper);
-void dc_resource_state_copy_construct(
- const struct dc_state *src_ctx,
- struct dc_state *dst_ctx);
-
-void dc_resource_state_copy_construct_current(
- const struct dc *dc,
- struct dc_state *dst_ctx);
-
-void dc_resource_state_destruct(struct dc_state *context);
-
bool dc_resource_is_dsc_encoding_supported(const struct dc *dc);
+void get_audio_check(struct audio_info *aud_modes,
+ struct audio_check *aud_chk);
enum dc_status dc_commit_streams(struct dc *dc,
struct dc_stream_state *streams[],
uint8_t stream_count);
-struct dc_state *dc_create_state(struct dc *dc);
-struct dc_state *dc_copy_state(struct dc_state *src_ctx);
-void dc_retain_state(struct dc_state *context);
-void dc_release_state(struct dc_state *context);
struct dc_plane_state *dc_get_surface_for_mpcc(struct dc *dc,
struct dc_stream_state *stream,
@@ -1541,7 +1527,13 @@ struct dc_link {
bool is_dig_mapping_flexible;
bool hpd_status; /* HPD status of link without physical HPD pin. */
bool is_hpd_pending; /* Indicates a new received hpd */
- bool is_automated; /* Indicates automated testing */
+
+ /* USB4 DPIA links skip verifying link cap, instead performing the fallback method
+ * for every link training. This is incompatible with DP LL compliance automation,
+ * which expects the same link settings to be used every retry on a link loss.
+ * This flag is used to skip the fallback when link loss occurs during automation.
+ */
+ bool skip_fallback_on_link_loss;
bool edp_sink_present;
@@ -2092,6 +2084,20 @@ bool dc_link_setup_psr(struct dc_link *dc_link,
const struct dc_stream_state *stream, struct psr_config *psr_config,
struct psr_context *psr_context);
+/*
+ * Communicate with DMUB to allow or disallow Panel Replay on the specified link:
+ *
+ * @link: pointer to the dc_link struct instance
+ * @enable: enable(active) or disable(inactive) replay
+ * @wait: state transition need to wait the active set completed.
+ * @force_static: force disable(inactive) the replay
+ * @power_opts: set power optimazation parameters to DMUB.
+ *
+ * return: allow Replay active will return true, else will return false.
+ */
+bool dc_link_set_replay_allow_active(struct dc_link *dc_link, const bool *enable,
+ bool wait, bool force_static, const unsigned int *power_opts);
+
bool dc_link_get_replay_state(const struct dc_link *dc_link, uint64_t *state);
/* On eDP links this function call will stall until T12 has elapsed.
@@ -2187,11 +2193,11 @@ int dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(
*
* @dc: pointer to dc struct
* @stream: pointer to all possible streams
- * @num_streams: number of valid DPIA streams
+ * @count: number of valid DPIA streams
*
* return: TRUE if bw used by DPIAs doesn't exceed available BW else return FALSE
*/
-bool dc_link_validate(struct dc *dc, const struct dc_stream_state *streams,
+bool dc_link_dp_dpia_validate(struct dc *dc, const struct dc_stream_state *streams,
const unsigned int count);
/* Sink Interfaces - A sink corresponds to a display output device */
@@ -2336,6 +2342,9 @@ void dc_hardware_release(struct dc *dc);
void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc);
bool dc_set_psr_allow_active(struct dc *dc, bool enable);
+
+bool dc_set_replay_allow_active(struct dc *dc, bool active);
+
void dc_z10_restore(const struct dc *dc);
void dc_z10_save_init(struct dc *dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
index be9aa1a71847..26940d94d8fb 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
@@ -140,7 +140,7 @@ struct dc_vbios_funcs {
enum bp_result (*enable_lvtma_control)(
struct dc_bios *bios,
uint8_t uc_pwr_on,
- uint8_t panel_instance,
+ uint8_t pwrseq_instance,
uint8_t bypass_panel_control_wait);
enum bp_result (*get_soc_bb_info)(
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 53400cc05b5b..2b79a0e5638e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -33,6 +33,7 @@
#include "cursor_reg_cache.h"
#include "resource.h"
#include "clk_mgr.h"
+#include "dc_state_priv.h"
#define CTX dc_dmub_srv->ctx
#define DC_LOGGER CTX->logger
@@ -140,7 +141,10 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
if (status == DMUB_STATUS_QUEUE_FULL) {
/* Execute and wait for queue to become empty again. */
- dmub_srv_cmd_execute(dmub);
+ status = dmub_srv_cmd_execute(dmub);
+ if (status == DMUB_STATUS_POWER_STATE_D3)
+ return false;
+
dmub_srv_wait_for_idle(dmub, 100000);
/* Requeue the command. */
@@ -148,16 +152,20 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
}
if (status != DMUB_STATUS_OK) {
- DC_ERROR("Error queueing DMUB command: status=%d\n", status);
- dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
+ if (status != DMUB_STATUS_POWER_STATE_D3) {
+ DC_ERROR("Error queueing DMUB command: status=%d\n", status);
+ dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
+ }
return false;
}
}
status = dmub_srv_cmd_execute(dmub);
if (status != DMUB_STATUS_OK) {
- DC_ERROR("Error starting DMUB execution: status=%d\n", status);
- dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
+ if (status != DMUB_STATUS_POWER_STATE_D3) {
+ DC_ERROR("Error starting DMUB execution: status=%d\n", status);
+ dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
+ }
return false;
}
@@ -218,7 +226,10 @@ bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int coun
if (status == DMUB_STATUS_QUEUE_FULL) {
/* Execute and wait for queue to become empty again. */
- dmub_srv_cmd_execute(dmub);
+ status = dmub_srv_cmd_execute(dmub);
+ if (status == DMUB_STATUS_POWER_STATE_D3)
+ return false;
+
dmub_srv_wait_for_idle(dmub, 100000);
/* Requeue the command. */
@@ -226,16 +237,20 @@ bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int coun
}
if (status != DMUB_STATUS_OK) {
- DC_ERROR("Error queueing DMUB command: status=%d\n", status);
- dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
+ if (status != DMUB_STATUS_POWER_STATE_D3) {
+ DC_ERROR("Error queueing DMUB command: status=%d\n", status);
+ dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
+ }
return false;
}
}
status = dmub_srv_cmd_execute(dmub);
if (status != DMUB_STATUS_OK) {
- DC_ERROR("Error starting DMUB execution: status=%d\n", status);
- dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
+ if (status != DMUB_STATUS_POWER_STATE_D3) {
+ DC_ERROR("Error starting DMUB execution: status=%d\n", status);
+ dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
+ }
return false;
}
@@ -287,17 +302,11 @@ bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv)
bool dc_dmub_srv_notify_stream_mask(struct dc_dmub_srv *dc_dmub_srv,
unsigned int stream_mask)
{
- struct dmub_srv *dmub;
- const uint32_t timeout = 30;
-
if (!dc_dmub_srv || !dc_dmub_srv->dmub)
return false;
- dmub = dc_dmub_srv->dmub;
-
- return dmub_srv_send_gpint_command(
- dmub, DMUB_GPINT__IDLE_OPT_NOTIFY_STREAM_MASK,
- stream_mask, timeout) == DMUB_STATUS_OK;
+ return dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__IDLE_OPT_NOTIFY_STREAM_MASK,
+ stream_mask, NULL, DM_DMUB_WAIT_TYPE_WAIT);
}
bool dc_dmub_srv_is_restore_required(struct dc_dmub_srv *dc_dmub_srv)
@@ -346,7 +355,7 @@ void dc_dmub_srv_drr_update_cmd(struct dc *dc, uint32_t tg_inst, uint32_t vtotal
cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header);
// Send the command to the DMCUB.
- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst)
@@ -360,7 +369,7 @@ void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst)
cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header);
// Send the command to the DMCUB.
- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
static uint8_t dc_dmub_srv_get_pipes_for_stream(struct dc *dc, struct dc_stream_state *stream)
@@ -453,7 +462,7 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru
sizeof(cmd.fw_assisted_mclk_switch) - sizeof(cmd.fw_assisted_mclk_switch.header);
// Send the command to the DMCUB.
- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
return true;
}
@@ -474,7 +483,7 @@ void dc_dmub_srv_query_caps_cmd(struct dc_dmub_srv *dc_dmub_srv)
cmd.query_feature_caps.header.payload_bytes = sizeof(struct dmub_cmd_query_feature_caps_data);
/* If command was processed, copy feature caps to dmub srv */
- if (dm_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
+ if (dc_wake_and_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
cmd.query_feature_caps.header.ret_status == 0) {
memcpy(&dc_dmub_srv->dmub->feature_caps,
&cmd.query_feature_caps.query_feature_caps_data,
@@ -499,7 +508,7 @@ void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pi
cmd.visual_confirm_color.visual_confirm_color_data.visual_confirm_color.panel_inst = panel_inst;
// If command was processed, copy feature caps to dmub srv
- if (dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
+ if (dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
cmd.visual_confirm_color.header.ret_status == 0) {
memcpy(&dc->ctx->dmub_srv->dmub->visual_confirm_color,
&cmd.visual_confirm_color.visual_confirm_color_data,
@@ -510,10 +519,11 @@ void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pi
/**
* populate_subvp_cmd_drr_info - Helper to populate DRR pipe info for the DMCUB subvp command
*
- * @dc: [in] current dc state
+ * @dc: [in] pointer to dc object
* @subvp_pipe: [in] pipe_ctx for the SubVP pipe
* @vblank_pipe: [in] pipe_ctx for the DRR pipe
* @pipe_data: [in] Pipe data which stores the VBLANK/DRR info
+ * @context: [in] DC state for access to phantom stream
*
* Populate the DMCUB SubVP command with DRR pipe info. All the information
* required for calculating the SubVP + DRR microschedule is populated here.
@@ -524,12 +534,14 @@ void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pi
* 3. Populate the drr_info with the min and max supported vtotal values
*/
static void populate_subvp_cmd_drr_info(struct dc *dc,
+ struct dc_state *context,
struct pipe_ctx *subvp_pipe,
struct pipe_ctx *vblank_pipe,
struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data)
{
+ struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream);
struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing;
- struct dc_crtc_timing *phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing;
+ struct dc_crtc_timing *phantom_timing = &phantom_stream->timing;
struct dc_crtc_timing *drr_timing = &vblank_pipe->stream->timing;
uint16_t drr_frame_us = 0;
uint16_t min_drr_supported_us = 0;
@@ -617,7 +629,7 @@ static void populate_subvp_cmd_vblank_pipe_info(struct dc *dc,
continue;
// Find the SubVP pipe
- if (pipe->stream->mall_stream_config.type == SUBVP_MAIN)
+ if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN)
break;
}
@@ -634,7 +646,7 @@ static void populate_subvp_cmd_vblank_pipe_info(struct dc *dc,
if (vblank_pipe->stream->ignore_msa_timing_param &&
(vblank_pipe->stream->allow_freesync || vblank_pipe->stream->vrr_active_variable || vblank_pipe->stream->vrr_active_fixed))
- populate_subvp_cmd_drr_info(dc, pipe, vblank_pipe, pipe_data);
+ populate_subvp_cmd_drr_info(dc, context, pipe, vblank_pipe, pipe_data);
}
/**
@@ -659,10 +671,17 @@ static void update_subvp_prefetch_end_to_mall_start(struct dc *dc,
uint32_t subvp0_prefetch_us = 0;
uint32_t subvp1_prefetch_us = 0;
uint32_t prefetch_delta_us = 0;
- struct dc_crtc_timing *phantom_timing0 = &subvp_pipes[0]->stream->mall_stream_config.paired_stream->timing;
- struct dc_crtc_timing *phantom_timing1 = &subvp_pipes[1]->stream->mall_stream_config.paired_stream->timing;
+ struct dc_stream_state *phantom_stream0 = NULL;
+ struct dc_stream_state *phantom_stream1 = NULL;
+ struct dc_crtc_timing *phantom_timing0 = NULL;
+ struct dc_crtc_timing *phantom_timing1 = NULL;
struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data = NULL;
+ phantom_stream0 = dc_state_get_paired_subvp_stream(context, subvp_pipes[0]->stream);
+ phantom_stream1 = dc_state_get_paired_subvp_stream(context, subvp_pipes[1]->stream);
+ phantom_timing0 = &phantom_stream0->timing;
+ phantom_timing1 = &phantom_stream1->timing;
+
subvp0_prefetch_us = div64_u64(((uint64_t)(phantom_timing0->v_total - phantom_timing0->v_front_porch) *
(uint64_t)phantom_timing0->h_total * 1000000),
(((uint64_t)phantom_timing0->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us));
@@ -712,8 +731,9 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc,
uint32_t j;
struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data =
&cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[cmd_pipe_index];
+ struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream);
struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing;
- struct dc_crtc_timing *phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing;
+ struct dc_crtc_timing *phantom_timing = &phantom_stream->timing;
uint32_t out_num_stream, out_den_stream, out_num_plane, out_den_plane, out_num, out_den;
pipe_data->mode = SUBVP;
@@ -767,7 +787,7 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc,
for (j = 0; j < dc->res_pool->pipe_count; j++) {
struct pipe_ctx *phantom_pipe = &context->res_ctx.pipe_ctx[j];
- if (phantom_pipe->stream == subvp_pipe->stream->mall_stream_config.paired_stream) {
+ if (phantom_pipe->stream == dc_state_get_paired_subvp_stream(context, subvp_pipe->stream)) {
pipe_data->pipe_config.subvp_data.phantom_pipe_index = phantom_pipe->stream_res.tg->inst;
if (phantom_pipe->bottom_pipe) {
pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->bottom_pipe->plane_res.hubp->inst;
@@ -801,6 +821,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
union dmub_rb_cmd cmd;
struct pipe_ctx *subvp_pipes[2];
uint32_t wm_val_refclk = 0;
+ enum mall_stream_type pipe_mall_type;
memset(&cmd, 0, sizeof(cmd));
// FW command for SUBVP
@@ -816,7 +837,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
*/
if (resource_is_pipe_type(pipe, OTG_MASTER) &&
resource_is_pipe_type(pipe, DPP_PIPE) &&
- pipe->stream->mall_stream_config.type == SUBVP_MAIN)
+ dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN)
subvp_pipes[subvp_count++] = pipe;
}
@@ -824,6 +845,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
// For each pipe that is a "main" SUBVP pipe, fill in pipe data for DMUB SUBVP cmd
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe);
if (!pipe->stream)
continue;
@@ -834,12 +856,11 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
*/
if (resource_is_pipe_type(pipe, OTG_MASTER) &&
resource_is_pipe_type(pipe, DPP_PIPE) &&
- pipe->stream->mall_stream_config.paired_stream &&
- pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
+ pipe_mall_type == SUBVP_MAIN) {
populate_subvp_cmd_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++);
} else if (resource_is_pipe_type(pipe, OTG_MASTER) &&
resource_is_pipe_type(pipe, DPP_PIPE) &&
- pipe->stream->mall_stream_config.type == SUBVP_NONE) {
+ pipe_mall_type == SUBVP_NONE) {
// Don't need to check for ActiveDRAMClockChangeMargin < 0, not valid in cases where
// we run through DML without calculating "natural" P-state support
populate_subvp_cmd_vblank_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++);
@@ -861,7 +882,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
cmd.fw_assisted_mclk_switch_v2.config_data.watermark_a_cache = wm_val_refclk < 0xFFFF ? wm_val_refclk : 0xFFFF;
}
- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *diag_data)
@@ -1098,7 +1119,7 @@ void dc_send_update_cursor_info_to_dmu(
pipe_idx, pCtx->plane_res.hubp, pCtx->plane_res.dpp);
/* Combine 2nd cmds update_curosr_info to DMU */
- dm_execute_dmub_cmd_list(pCtx->stream->ctx, 2, cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd_list(pCtx->stream->ctx, 2, cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
}
@@ -1112,25 +1133,20 @@ bool dc_dmub_check_min_version(struct dmub_srv *srv)
void dc_dmub_srv_enable_dpia_trace(const struct dc *dc)
{
struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv;
- struct dmub_srv *dmub;
- enum dmub_status status;
- static const uint32_t timeout_us = 30;
if (!dc_dmub_srv || !dc_dmub_srv->dmub) {
DC_LOG_ERROR("%s: invalid parameters.", __func__);
return;
}
- dmub = dc_dmub_srv->dmub;
-
- status = dmub_srv_send_gpint_command(dmub, DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD1, 0x0010, timeout_us);
- if (status != DMUB_STATUS_OK) {
+ if (!dc_wake_and_execute_gpint(dc->ctx, DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD1,
+ 0x0010, NULL, DM_DMUB_WAIT_TYPE_WAIT)) {
DC_LOG_ERROR("timeout updating trace buffer mask word\n");
return;
}
- status = dmub_srv_send_gpint_command(dmub, DMUB_GPINT__UPDATE_TRACE_BUFFER_MASK, 0x0000, timeout_us);
- if (status != DMUB_STATUS_OK) {
+ if (!dc_wake_and_execute_gpint(dc->ctx, DMUB_GPINT__UPDATE_TRACE_BUFFER_MASK,
+ 0x0000, NULL, DM_DMUB_WAIT_TYPE_WAIT)) {
DC_LOG_ERROR("timeout updating trace buffer mask word\n");
return;
}
@@ -1148,6 +1164,9 @@ bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait)
struct dc_context *dc_ctx = dc_dmub_srv->ctx;
enum dmub_status status;
+ if (!dc_dmub_srv || !dc_dmub_srv->dmub)
+ return true;
+
if (dc_dmub_srv->ctx->dc->debug.dmcub_emulation)
return true;
@@ -1169,7 +1188,7 @@ bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait)
return true;
}
-void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
+static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
{
union dmub_rb_cmd cmd = {0};
@@ -1190,20 +1209,20 @@ void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
dc->hwss.set_idle_state(dc, true);
}
- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ /* NOTE: This does not use the "wake" interface since this is part of the wake path. */
+ /* We also do not perform a wait since DMCUB could enter idle after the notification. */
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
}
-void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
+static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
{
- const uint32_t max_num_polls = 10000;
uint32_t allow_state = 0;
uint32_t commit_state = 0;
- int i;
if (dc->debug.dmcub_emulation)
return;
- if (!dc->idle_optimizations_allowed)
+ if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub)
return;
if (dc->hwss.get_idle_state &&
@@ -1215,8 +1234,16 @@ void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
if (!(allow_state & DMUB_IPS2_ALLOW_MASK)) {
// Wait for evaluation time
- udelay(dc->debug.ips2_eval_delay_us);
- commit_state = dc->hwss.get_idle_state(dc);
+ for (;;) {
+ udelay(dc->debug.ips2_eval_delay_us);
+ commit_state = dc->hwss.get_idle_state(dc);
+ if (commit_state & DMUB_IPS2_ALLOW_MASK)
+ break;
+
+ /* allow was still set, retry eval delay */
+ dc->hwss.set_idle_state(dc, false);
+ }
+
if (!(commit_state & DMUB_IPS2_COMMIT_MASK)) {
// Tell PMFW to exit low power state
dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
@@ -1225,17 +1252,13 @@ void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
udelay(dc->debug.ips2_entry_delay_us);
dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
- for (i = 0; i < max_num_polls; ++i) {
+ for (;;) {
commit_state = dc->hwss.get_idle_state(dc);
if (commit_state & DMUB_IPS2_COMMIT_MASK)
break;
udelay(1);
-
- if (dc->debug.disable_timeout)
- i--;
}
- ASSERT(i < max_num_polls);
if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true))
ASSERT(0);
@@ -1250,17 +1273,13 @@ void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
dc_dmub_srv_notify_idle(dc, false);
if (!(allow_state & DMUB_IPS1_ALLOW_MASK)) {
- for (i = 0; i < max_num_polls; ++i) {
+ for (;;) {
commit_state = dc->hwss.get_idle_state(dc);
if (commit_state & DMUB_IPS1_COMMIT_MASK)
break;
udelay(1);
-
- if (dc->debug.disable_timeout)
- i--;
}
- ASSERT(i < max_num_polls);
}
}
@@ -1268,3 +1287,131 @@ void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
ASSERT(0);
}
+void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state powerState)
+{
+ struct dmub_srv *dmub;
+
+ if (!dc_dmub_srv)
+ return;
+
+ dmub = dc_dmub_srv->dmub;
+
+ if (powerState == DC_ACPI_CM_POWER_STATE_D0)
+ dmub_srv_set_power_state(dmub, DMUB_POWER_STATE_D0);
+ else
+ dmub_srv_set_power_state(dmub, DMUB_POWER_STATE_D3);
+}
+
+void dc_dmub_srv_apply_idle_power_optimizations(const struct dc *dc, bool allow_idle)
+{
+ struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv;
+
+ if (!dc_dmub_srv || !dc_dmub_srv->dmub)
+ return;
+
+ if (dc_dmub_srv->idle_allowed == allow_idle)
+ return;
+
+ /*
+ * Entering a low power state requires a driver notification.
+ * Powering up the hardware requires notifying PMFW and DMCUB.
+ * Clearing the driver idle allow requires a DMCUB command.
+ * DMCUB commands requires the DMCUB to be powered up and restored.
+ *
+ * Exit out early to prevent an infinite loop of DMCUB commands
+ * triggering exit low power - use software state to track this.
+ */
+ dc_dmub_srv->idle_allowed = allow_idle;
+
+ if (!allow_idle)
+ dc_dmub_srv_exit_low_power_state(dc);
+ else
+ dc_dmub_srv_notify_idle(dc, allow_idle);
+}
+
+bool dc_wake_and_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd,
+ enum dm_dmub_wait_type wait_type)
+{
+ return dc_wake_and_execute_dmub_cmd_list(ctx, 1, cmd, wait_type);
+}
+
+bool dc_wake_and_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count,
+ union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type)
+{
+ struct dc_dmub_srv *dc_dmub_srv = ctx->dmub_srv;
+ bool result = false, reallow_idle = false;
+
+ if (!dc_dmub_srv || !dc_dmub_srv->dmub)
+ return false;
+
+ if (count == 0)
+ return true;
+
+ if (dc_dmub_srv->idle_allowed) {
+ dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, false);
+ reallow_idle = true;
+ }
+
+ /*
+ * These may have different implementations in DM, so ensure
+ * that we guide it to the expected helper.
+ */
+ if (count > 1)
+ result = dm_execute_dmub_cmd_list(ctx, count, cmd, wait_type);
+ else
+ result = dm_execute_dmub_cmd(ctx, cmd, wait_type);
+
+ if (result && reallow_idle)
+ dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true);
+
+ return result;
+}
+
+static bool dc_dmub_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_command command_code,
+ uint16_t param, uint32_t *response, enum dm_dmub_wait_type wait_type)
+{
+ struct dc_dmub_srv *dc_dmub_srv = ctx->dmub_srv;
+ const uint32_t wait_us = wait_type == DM_DMUB_WAIT_TYPE_NO_WAIT ? 0 : 30;
+ enum dmub_status status;
+
+ if (response)
+ *response = 0;
+
+ if (!dc_dmub_srv || !dc_dmub_srv->dmub)
+ return false;
+
+ status = dmub_srv_send_gpint_command(dc_dmub_srv->dmub, command_code, param, wait_us);
+ if (status != DMUB_STATUS_OK) {
+ if (status == DMUB_STATUS_TIMEOUT && wait_type == DM_DMUB_WAIT_TYPE_NO_WAIT)
+ return true;
+
+ return false;
+ }
+
+ if (response && wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)
+ dmub_srv_get_gpint_response(dc_dmub_srv->dmub, response);
+
+ return true;
+}
+
+bool dc_wake_and_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_command command_code,
+ uint16_t param, uint32_t *response, enum dm_dmub_wait_type wait_type)
+{
+ struct dc_dmub_srv *dc_dmub_srv = ctx->dmub_srv;
+ bool result = false, reallow_idle = false;
+
+ if (!dc_dmub_srv || !dc_dmub_srv->dmub)
+ return false;
+
+ if (dc_dmub_srv->idle_allowed) {
+ dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, false);
+ reallow_idle = true;
+ }
+
+ result = dc_dmub_execute_gpint(ctx, command_code, param, response, wait_type);
+
+ if (result && reallow_idle)
+ dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true);
+
+ return result;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
index d4a60f53faab..952bfb368886 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -50,6 +50,8 @@ struct dc_dmub_srv {
struct dc_context *ctx;
void *dm;
+
+ bool idle_allowed;
};
void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv);
@@ -100,6 +102,59 @@ void dc_dmub_srv_enable_dpia_trace(const struct dc *dc);
void dc_dmub_srv_subvp_save_surf_addr(const struct dc_dmub_srv *dc_dmub_srv, const struct dc_plane_address *addr, uint8_t subvp_index);
bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait);
-void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle);
-void dc_dmub_srv_exit_low_power_state(const struct dc *dc);
+
+void dc_dmub_srv_apply_idle_power_optimizations(const struct dc *dc, bool allow_idle);
+
+void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state powerState);
+
+/**
+ * dc_wake_and_execute_dmub_cmd() - Wrapper for DMUB command execution.
+ *
+ * Refer to dc_wake_and_execute_dmub_cmd_list() for usage and limitations,
+ * This function is a convenience wrapper for a single command execution.
+ *
+ * @ctx: DC context
+ * @cmd: The command to send/receive
+ * @wait_type: The wait behavior for the execution
+ *
+ * Return: true on command submission success, false otherwise
+ */
+bool dc_wake_and_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd,
+ enum dm_dmub_wait_type wait_type);
+
+/**
+ * dc_wake_and_execute_dmub_cmd_list() - Wrapper for DMUB command list execution.
+ *
+ * If the DMCUB hardware was asleep then it wakes the DMUB before
+ * executing the command and attempts to re-enter if the command
+ * submission was successful.
+ *
+ * This should be the preferred command submission interface provided
+ * the DC lock is acquired.
+ *
+ * Entry/exit out of idle power optimizations would need to be
+ * manually performed otherwise through dc_allow_idle_optimizations().
+ *
+ * @ctx: DC context
+ * @count: Number of commands to send/receive
+ * @cmd: Array of commands to send
+ * @wait_type: The wait behavior for the execution
+ *
+ * Return: true on command submission success, false otherwise
+ */
+bool dc_wake_and_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count,
+ union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type);
+
+/**
+ * dc_wake_and_execute_gpint()
+ *
+ * @ctx: DC context
+ * @command_code: The command ID to send to DMCUB
+ * @param: The parameter to message DMCUB
+ * @response: Optional response out value - may be NULL.
+ * @wait_type: The wait behavior for the execution
+ */
+bool dc_wake_and_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_command command_code,
+ uint16_t param, uint32_t *response, enum dm_dmub_wait_type wait_type);
+
#endif /* _DMUB_DC_SRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index eeeeeef4d717..1cb7765f593a 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -1377,6 +1377,12 @@ struct dp_trace {
#ifndef DP_TUNNELING_STATUS
#define DP_TUNNELING_STATUS 0xE0025 /* 1.4a */
#endif
+#ifndef DP_TUNNELING_MAX_LINK_RATE
+#define DP_TUNNELING_MAX_LINK_RATE 0xE0028 /* 1.4a */
+#endif
+#ifndef DP_TUNNELING_MAX_LANE_COUNT
+#define DP_TUNNELING_MAX_LANE_COUNT 0xE0029 /* 1.4a */
+#endif
#ifndef DPTX_BW_ALLOCATION_MODE_CONTROL
#define DPTX_BW_ALLOCATION_MODE_CONTROL 0xE0030 /* 1.4a */
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
index cb6eaddab720..8f9a67825615 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -50,7 +50,7 @@ static inline void submit_dmub_read_modify_write(
cmd_buf->header.payload_bytes =
sizeof(struct dmub_cmd_read_modify_write_sequence) * offload->reg_seq_count;
- dm_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT);
+ dc_wake_and_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT);
memset(cmd_buf, 0, sizeof(*cmd_buf));
@@ -67,7 +67,7 @@ static inline void submit_dmub_burst_write(
cmd_buf->header.payload_bytes =
sizeof(uint32_t) * offload->reg_seq_count;
- dm_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT);
+ dc_wake_and_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT);
memset(cmd_buf, 0, sizeof(*cmd_buf));
@@ -80,7 +80,7 @@ static inline void submit_dmub_reg_wait(
{
struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait;
- dm_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT);
+ dc_wake_and_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT);
memset(cmd_buf, 0, sizeof(*cmd_buf));
offload->reg_seq_count = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index 9649934ea186..811474f4419b 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -244,7 +244,7 @@ enum pixel_format {
#define DC_MAX_DIRTY_RECTS 3
struct dc_flip_addrs {
struct dc_plane_address address;
- unsigned int flip_timestamp_in_us;
+ unsigned long long flip_timestamp_in_us;
bool flip_immediate;
/* TODO: add flip duration for FreeSync */
bool triplebuffer_flips;
@@ -465,6 +465,7 @@ struct dc_cursor_mi_param {
struct fixed31_32 v_scale_ratio;
enum dc_rotation_angle rotation;
bool mirror;
+ struct dc_stream_state *stream;
};
/* IPP related types */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_plane.h b/drivers/gpu/drm/amd/display/dc/dc_plane.h
new file mode 100644
index 000000000000..ef380cae816a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_plane.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DC_PLANE_H_
+#define _DC_PLANE_H_
+
+#include "dc.h"
+#include "dc_hw_types.h"
+
+struct dc_plane_state *dc_create_plane_state(struct dc *dc);
+const struct dc_plane_status *dc_plane_get_status(
+ const struct dc_plane_state *plane_state);
+void dc_plane_state_retain(struct dc_plane_state *plane_state);
+void dc_plane_state_release(struct dc_plane_state *plane_state);
+
+#endif /* _DC_PLANE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_plane_priv.h b/drivers/gpu/drm/amd/display/dc/dc_plane_priv.h
new file mode 100644
index 000000000000..9ee184c1df00
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_plane_priv.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DC_PLANE_PRIV_H_
+#define _DC_PLANE_PRIV_H_
+
+#include "dc_plane.h"
+
+void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_state);
+void dc_plane_destruct(struct dc_plane_state *plane_state);
+
+#endif /* _DC_PLANE_PRIV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_state.h b/drivers/gpu/drm/amd/display/dc/dc_state.h
new file mode 100644
index 000000000000..d167fdbfa8a9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_state.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DC_STATE_H_
+#define _DC_STATE_H_
+
+#include "dc.h"
+#include "inc/core_status.h"
+
+struct dc_state *dc_state_create(struct dc *dc);
+void dc_state_copy(struct dc_state *dst_state, struct dc_state *src_state);
+struct dc_state *dc_state_create_copy(struct dc_state *src_state);
+void dc_state_copy_current(struct dc *dc, struct dc_state *dst_state);
+struct dc_state *dc_state_create_current_copy(struct dc *dc);
+void dc_state_construct(struct dc *dc, struct dc_state *state);
+void dc_state_destruct(struct dc_state *state);
+void dc_state_retain(struct dc_state *state);
+void dc_state_release(struct dc_state *state);
+
+enum dc_status dc_state_add_stream(struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *stream);
+
+enum dc_status dc_state_remove_stream(
+ struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *stream);
+
+bool dc_state_add_plane(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_plane_state *plane_state,
+ struct dc_state *state);
+
+bool dc_state_remove_plane(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_plane_state *plane_state,
+ struct dc_state *state);
+
+bool dc_state_rem_all_planes_for_stream(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_state *state);
+
+bool dc_state_add_all_planes_for_stream(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_plane_state * const *plane_states,
+ int plane_count,
+ struct dc_state *state);
+
+struct dc_stream_status *dc_state_get_stream_status(
+ struct dc_state *state,
+ struct dc_stream_state *stream);
+#endif /* _DC_STATE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_state_priv.h b/drivers/gpu/drm/amd/display/dc/dc_state_priv.h
new file mode 100644
index 000000000000..c1f44e09a6c1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_state_priv.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DC_STATE_PRIV_H_
+#define _DC_STATE_PRIV_H_
+
+#include "dc_state.h"
+#include "dc_stream.h"
+
+/* Get the type of the provided resource (none, phantom, main) based on the provided
+ * context. If the context is unavailable, determine only if phantom or not.
+ */
+enum mall_stream_type dc_state_get_pipe_subvp_type(const struct dc_state *state,
+ const struct pipe_ctx *pipe_ctx);
+enum mall_stream_type dc_state_get_stream_subvp_type(const struct dc_state *state,
+ const struct dc_stream_state *stream);
+
+/* Gets the phantom stream if main is provided, gets the main if phantom is provided.*/
+struct dc_stream_state *dc_state_get_paired_subvp_stream(const struct dc_state *state,
+ const struct dc_stream_state *stream);
+
+/* allocate's phantom stream or plane and returns pointer to the object */
+struct dc_stream_state *dc_state_create_phantom_stream(const struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *main_stream);
+struct dc_plane_state *dc_state_create_phantom_plane(struct dc *dc,
+ struct dc_state *state,
+ struct dc_plane_state *main_plane);
+
+/* deallocate's phantom stream or plane */
+void dc_state_release_phantom_stream(const struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *phantom_stream);
+void dc_state_release_phantom_plane(const struct dc *dc,
+ struct dc_state *state,
+ struct dc_plane_state *phantom_plane);
+
+/* add/remove phantom stream to context and generate subvp meta data */
+enum dc_status dc_state_add_phantom_stream(struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *phantom_stream,
+ struct dc_stream_state *main_stream);
+enum dc_status dc_state_remove_phantom_stream(struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *phantom_stream);
+
+bool dc_state_add_phantom_plane(
+ const struct dc *dc,
+ struct dc_stream_state *phantom_stream,
+ struct dc_plane_state *phantom_plane,
+ struct dc_state *state);
+
+bool dc_state_remove_phantom_plane(
+ const struct dc *dc,
+ struct dc_stream_state *phantom_stream,
+ struct dc_plane_state *phantom_plane,
+ struct dc_state *state);
+
+bool dc_state_rem_all_phantom_planes_for_stream(
+ const struct dc *dc,
+ struct dc_stream_state *phantom_stream,
+ struct dc_state *state,
+ bool should_release_planes);
+
+bool dc_state_add_all_phantom_planes_for_stream(
+ const struct dc *dc,
+ struct dc_stream_state *phantom_stream,
+ struct dc_plane_state * const *phantom_planes,
+ int plane_count,
+ struct dc_state *state);
+
+bool dc_state_remove_phantom_streams_and_planes(
+ struct dc *dc,
+ struct dc_state *state);
+
+void dc_state_release_phantom_streams_and_planes(
+ struct dc *dc,
+ struct dc_state *state);
+
+#endif /* _DC_STATE_PRIV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index e61eea6db29c..ee10941caa59 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -38,6 +38,14 @@ struct timing_sync_info {
bool master;
};
+struct mall_stream_config {
+ /* MALL stream config to indicate if the stream is phantom or not.
+ * We will use a phantom stream to indicate that the pipe is phantom.
+ */
+ enum mall_stream_type type;
+ struct dc_stream_state *paired_stream; // master / slave stream
+};
+
struct dc_stream_status {
int primary_otg_inst;
int stream_enc_inst;
@@ -50,6 +58,7 @@ struct dc_stream_status {
struct timing_sync_info timing_sync_info;
struct dc_plane_state *plane_states[MAX_SURFACE_NUM];
bool is_abm_supported;
+ struct mall_stream_config mall_stream_config;
};
enum hubp_dmdata_mode {
@@ -147,31 +156,6 @@ struct test_pattern {
#define SUBVP_DRR_MARGIN_US 100 // 100us for DRR margin (SubVP + DRR)
-enum mall_stream_type {
- SUBVP_NONE, // subvp not in use
- SUBVP_MAIN, // subvp in use, this stream is main stream
- SUBVP_PHANTOM, // subvp in use, this stream is a phantom stream
-};
-
-struct mall_stream_config {
- /* MALL stream config to indicate if the stream is phantom or not.
- * We will use a phantom stream to indicate that the pipe is phantom.
- */
- enum mall_stream_type type;
- struct dc_stream_state *paired_stream; // master / slave stream
-};
-
-/* Temp struct used to save and restore MALL config
- * during validation.
- *
- * TODO: Move MALL config into dc_state instead of stream struct
- * to avoid needing to save/restore.
- */
-struct mall_temp_config {
- struct mall_stream_config mall_stream_config[MAX_PIPES];
- bool is_phantom_plane[MAX_PIPES];
-};
-
struct dc_stream_debug_options {
char force_odm_combine_segments;
};
@@ -301,7 +285,7 @@ struct dc_stream_state {
bool has_non_synchronizable_pclk;
bool vblank_synchronized;
bool fpo_in_use;
- struct mall_stream_config mall_stream_config;
+ bool is_phantom;
};
#define ABM_LEVEL_IMMEDIATE_DISABLE 255
@@ -415,45 +399,14 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
uint32_t *h_position,
uint32_t *v_position);
-enum dc_status dc_add_stream_to_ctx(
- struct dc *dc,
- struct dc_state *new_ctx,
- struct dc_stream_state *stream);
-
-enum dc_status dc_remove_stream_from_ctx(
- struct dc *dc,
- struct dc_state *new_ctx,
- struct dc_stream_state *stream);
-
-
-bool dc_add_plane_to_context(
- const struct dc *dc,
- struct dc_stream_state *stream,
- struct dc_plane_state *plane_state,
- struct dc_state *context);
-
-bool dc_remove_plane_from_context(
- const struct dc *dc,
- struct dc_stream_state *stream,
- struct dc_plane_state *plane_state,
- struct dc_state *context);
-
-bool dc_rem_all_planes_for_stream(
- const struct dc *dc,
- struct dc_stream_state *stream,
- struct dc_state *context);
-
-bool dc_add_all_planes_for_stream(
- const struct dc *dc,
- struct dc_stream_state *stream,
- struct dc_plane_state * const *plane_states,
- int plane_count,
- struct dc_state *context);
-
bool dc_stream_add_writeback(struct dc *dc,
struct dc_stream_state *stream,
struct dc_writeback_info *wb_info);
+bool dc_stream_fc_disable_writeback(struct dc *dc,
+ struct dc_stream_state *stream,
+ uint32_t dwb_pipe_inst);
+
bool dc_stream_remove_writeback(struct dc *dc,
struct dc_stream_state *stream,
uint32_t dwb_pipe_inst);
@@ -514,9 +467,6 @@ void update_stream_signal(struct dc_stream_state *stream, struct dc_sink *sink);
void dc_stream_retain(struct dc_stream_state *dc_stream);
void dc_stream_release(struct dc_stream_state *dc_stream);
-struct dc_stream_status *dc_stream_get_status_from_state(
- struct dc_state *state,
- struct dc_stream_state *stream);
struct dc_stream_status *dc_stream_get_status(
struct dc_stream_state *dc_stream);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream_priv.h b/drivers/gpu/drm/amd/display/dc/dc_stream_priv.h
new file mode 100644
index 000000000000..7476fd52ce2b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream_priv.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DC_STREAM_PRIV_H_
+#define _DC_STREAM_PRIV_H_
+
+#include "dc_stream.h"
+
+bool dc_stream_construct(struct dc_stream_state *stream,
+ struct dc_sink *dc_sink_data);
+void dc_stream_destruct(struct dc_stream_state *stream);
+
+void dc_stream_assign_stream_id(struct dc_stream_state *stream);
+
+#endif // _DC_STREAM_PRIV_H_
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 7313cfe69498..b08ccb8c68bc 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -1140,25 +1140,34 @@ struct dc_panel_config {
} ilr;
};
+#define MAX_SINKS_PER_LINK 4
+
/*
* USB4 DPIA BW ALLOCATION STRUCTS
*/
struct dc_dpia_bw_alloc {
- int sink_verified_bw; // The Verified BW that sink can allocated and use that has been verified already
- int sink_allocated_bw; // The Actual Allocated BW that sink currently allocated
- int sink_max_bw; // The Max BW that sink can require/support
+ int remote_sink_req_bw[MAX_SINKS_PER_LINK]; // BW requested by remote sinks
+ int link_verified_bw; // The Verified BW that link can allocated and use that has been verified already
+ int link_max_bw; // The Max BW that link can require/support
+ int allocated_bw; // The Actual Allocated BW for this DPIA
int estimated_bw; // The estimated available BW for this DPIA
int bw_granularity; // BW Granularity
+ int dp_overhead; // DP overhead in dp tunneling
bool bw_alloc_enabled; // The BW Alloc Mode Support is turned ON for all 3: DP-Tx & Dpia & CM
bool response_ready; // Response ready from the CM side
+ uint8_t nrd_max_lane_count; // Non-reduced max lane count
+ uint8_t nrd_max_link_rate; // Non-reduced max link rate
};
-#define MAX_SINKS_PER_LINK 4
-
enum dc_hpd_enable_select {
HPD_EN_FOR_ALL_EDP = 0,
HPD_EN_FOR_PRIMARY_EDP_ONLY,
HPD_EN_FOR_SECONDARY_EDP_ONLY,
};
+enum mall_stream_type {
+ SUBVP_NONE, // subvp not in use
+ SUBVP_MAIN, // subvp in use, this stream is main stream
+ SUBVP_PHANTOM, // subvp in use, this stream is a phantom stream
+};
#endif /* DC_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
index 874b132fe1d7..a6006776333d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
@@ -135,7 +135,7 @@ static void dmcu_set_backlight_level(
0, 1, 80000);
}
-static void dce_abm_init(struct abm *abm, uint32_t backlight)
+static void dce_abm_init(struct abm *abm, uint32_t backlight, uint32_t user_level)
{
struct dce_abm *abm_dce = TO_DCE_ABM(abm);
@@ -162,7 +162,7 @@ static void dce_abm_init(struct abm *abm, uint32_t backlight)
BL1_PWM_TARGET_ABM_LEVEL, backlight);
REG_UPDATE(BL1_PWM_USER_LEVEL,
- BL1_PWM_USER_LEVEL, backlight);
+ BL1_PWM_USER_LEVEL, user_level);
REG_UPDATE_2(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES,
ABM1_LS_MIN_PIXEL_VALUE_THRES, 0,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
index 140598f18bbd..f0458b8f00af 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
@@ -782,7 +782,7 @@ static void get_azalia_clock_info_dp(
/*audio_dto_module = dpDtoSourceClockInkhz * 10,000;
* [khz] ->[100Hz] */
azalia_clock_info->audio_dto_module =
- pll_info->dp_dto_source_clock_in_khz * 10;
+ pll_info->audio_dto_source_clock_in_khz * 10;
}
void dce_aud_wall_dto_setup(
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 5d3f6fa1011e..970644b695cd 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -975,6 +975,9 @@ static bool dcn31_program_pix_clk(
look_up_in_video_optimized_rate_tlb(pix_clk_params->requested_pix_clk_100hz / 10);
struct bp_pixel_clock_parameters bp_pc_params = {0};
enum transmitter_color_depth bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_24;
+
+ if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0)
+ dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz;
// For these signal types Driver to program DP_DTO without calling VBIOS Command table
if (dc_is_dp_signal(pix_clk_params->signal_type) || dc_is_virtual_signal(pix_clk_params->signal_type)) {
if (e) {
@@ -1088,6 +1091,10 @@ static bool get_pixel_clk_frequency_100hz(
struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source);
unsigned int clock_hz = 0;
unsigned int modulo_hz = 0;
+ unsigned int dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dprefclk_khz;
+
+ if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0)
+ dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz;
if (clock_source->id == CLOCK_SOURCE_ID_DP_DTO) {
clock_hz = REG_READ(PHASE[inst]);
@@ -1100,7 +1107,7 @@ static bool get_pixel_clk_frequency_100hz(
modulo_hz = REG_READ(MODULO[inst]);
if (modulo_hz)
*pixel_clk_khz = div_u64((uint64_t)clock_hz*
- clock_source->ctx->dc->clk_mgr->dprefclk_khz*10,
+ dp_dto_ref_khz*10,
modulo_hz);
else
*pixel_clk_khz = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
index d3e6544022b7..ccc154b0281c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
@@ -57,18 +57,22 @@ static unsigned int abm_feature_support(struct abm *abm, unsigned int panel_inst
return ret;
}
-static void dmub_abm_init_ex(struct abm *abm, uint32_t backlight)
+static void dmub_abm_init_ex(struct abm *abm, uint32_t backlight, uint32_t user_level)
{
- dmub_abm_init(abm, backlight);
+ dmub_abm_init(abm, backlight, user_level);
}
static unsigned int dmub_abm_get_current_backlight_ex(struct abm *abm)
{
+ dc_allow_idle_optimizations(abm->ctx->dc, false);
+
return dmub_abm_get_current_backlight(abm);
}
static unsigned int dmub_abm_get_target_backlight_ex(struct abm *abm)
{
+ dc_allow_idle_optimizations(abm->ctx->dc, false);
+
return dmub_abm_get_target_backlight(abm);
}
@@ -145,7 +149,11 @@ static bool dmub_abm_save_restore_ex(
return ret;
}
-static bool dmub_abm_set_pipe_ex(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst)
+static bool dmub_abm_set_pipe_ex(struct abm *abm,
+ uint32_t otg_inst,
+ uint32_t option,
+ uint32_t panel_inst,
+ uint32_t pwrseq_inst)
{
bool ret = false;
unsigned int feature_support;
@@ -153,7 +161,7 @@ static bool dmub_abm_set_pipe_ex(struct abm *abm, uint32_t otg_inst, uint32_t op
feature_support = abm_feature_support(abm, panel_inst);
if (feature_support == ABM_LCD_SUPPORT)
- ret = dmub_abm_set_pipe(abm, otg_inst, option, panel_inst);
+ ret = dmub_abm_set_pipe(abm, otg_inst, option, panel_inst, pwrseq_inst);
return ret;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
index 592a8f7a1c6d..f9d6a181164a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
@@ -76,10 +76,10 @@ static void dmub_abm_enable_fractional_pwm(struct dc_context *dc)
cmd.abm_set_pwm_frac.abm_set_pwm_frac_data.panel_mask = panel_mask;
cmd.abm_set_pwm_frac.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pwm_frac_data);
- dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
-void dmub_abm_init(struct abm *abm, uint32_t backlight)
+void dmub_abm_init(struct abm *abm, uint32_t backlight, uint32_t user_level)
{
struct dce_abm *dce_abm = TO_DMUB_ABM(abm);
@@ -106,7 +106,7 @@ void dmub_abm_init(struct abm *abm, uint32_t backlight)
BL1_PWM_TARGET_ABM_LEVEL, backlight);
REG_UPDATE(BL1_PWM_USER_LEVEL,
- BL1_PWM_USER_LEVEL, backlight);
+ BL1_PWM_USER_LEVEL, user_level);
REG_UPDATE_2(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES,
ABM1_LS_MIN_PIXEL_VALUE_THRES, 0,
@@ -155,7 +155,7 @@ bool dmub_abm_set_level(struct abm *abm, uint32_t level, uint8_t panel_mask)
cmd.abm_set_level.abm_set_level_data.panel_mask = panel_mask;
cmd.abm_set_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_level_data);
- dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
return true;
}
@@ -186,7 +186,7 @@ void dmub_abm_init_config(struct abm *abm,
cmd.abm_init_config.header.payload_bytes = sizeof(struct dmub_cmd_abm_init_config_data);
- dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
@@ -203,7 +203,7 @@ bool dmub_abm_set_pause(struct abm *abm, bool pause, unsigned int panel_inst, un
cmd.abm_pause.abm_pause_data.panel_mask = panel_mask;
cmd.abm_set_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_pause_data);
- dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
return true;
}
@@ -246,7 +246,7 @@ bool dmub_abm_save_restore(
cmd.abm_save_restore.header.payload_bytes = sizeof(struct dmub_rb_cmd_abm_save_restore);
- dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
// Copy iramtable data into local structure
memcpy((void *)pData, dc->dmub_srv->dmub->scratch_mem_fb.cpu_addr, bytes);
@@ -254,7 +254,11 @@ bool dmub_abm_save_restore(
return true;
}
-bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst)
+bool dmub_abm_set_pipe(struct abm *abm,
+ uint32_t otg_inst,
+ uint32_t option,
+ uint32_t panel_inst,
+ uint32_t pwrseq_inst)
{
union dmub_rb_cmd cmd;
struct dc_context *dc = abm->ctx;
@@ -264,12 +268,13 @@ bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint
cmd.abm_set_pipe.header.type = DMUB_CMD__ABM;
cmd.abm_set_pipe.header.sub_type = DMUB_CMD__ABM_SET_PIPE;
cmd.abm_set_pipe.abm_set_pipe_data.otg_inst = otg_inst;
+ cmd.abm_set_pipe.abm_set_pipe_data.pwrseq_inst = pwrseq_inst;
cmd.abm_set_pipe.abm_set_pipe_data.set_pipe_option = option;
cmd.abm_set_pipe.abm_set_pipe_data.panel_inst = panel_inst;
cmd.abm_set_pipe.abm_set_pipe_data.ramping_boundary = ramping_boundary;
cmd.abm_set_pipe.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pipe_data);
- dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
return true;
}
@@ -291,7 +296,7 @@ bool dmub_abm_set_backlight_level(struct abm *abm,
cmd.abm_set_backlight.abm_set_backlight_data.panel_mask = (0x01 << panel_inst);
cmd.abm_set_backlight.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_backlight_data);
- dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h
index 853564d7f471..761685e5b8c9 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h
@@ -30,7 +30,7 @@
struct abm_save_restore;
-void dmub_abm_init(struct abm *abm, uint32_t backlight);
+void dmub_abm_init(struct abm *abm, uint32_t backlight, uint32_t user_level);
bool dmub_abm_set_level(struct abm *abm, uint32_t level, uint8_t panel_mask);
unsigned int dmub_abm_get_current_backlight(struct abm *abm);
unsigned int dmub_abm_get_target_backlight(struct abm *abm);
@@ -44,7 +44,7 @@ bool dmub_abm_save_restore(
struct dc_context *dc,
unsigned int panel_inst,
struct abm_save_restore *pData);
-bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst);
+bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst, uint32_t pwrseq_inst);
bool dmub_abm_set_backlight_level(struct abm *abm,
unsigned int backlight_pwm_u16_16,
unsigned int frame_ramp,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
index 2aa0e01a6891..ba1fec3016d5 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
@@ -47,7 +47,7 @@ void dmub_hw_lock_mgr_cmd(struct dc_dmub_srv *dmub_srv,
if (!lock)
cmd.lock_hw.lock_hw_data.should_release = 1;
- dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c
index d8009b2dc56a..98a778996e1a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c
@@ -48,5 +48,5 @@ void dmub_enable_outbox_notification(struct dc_dmub_srv *dmub_srv)
sizeof(cmd.outbox1_enable.header);
cmd.outbox1_enable.enable = true;
- dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index 9d4170a356a2..3e243e407bb8 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -105,23 +105,18 @@ static enum dc_psr_state convert_psr_state(uint32_t raw_state)
*/
static void dmub_psr_get_state(struct dmub_psr *dmub, enum dc_psr_state *state, uint8_t panel_inst)
{
- struct dmub_srv *srv = dmub->ctx->dmub_srv->dmub;
uint32_t raw_state = 0;
uint32_t retry_count = 0;
- enum dmub_status status;
do {
// Send gpint command and wait for ack
- status = dmub_srv_send_gpint_command(srv, DMUB_GPINT__GET_PSR_STATE, panel_inst, 30);
-
- if (status == DMUB_STATUS_OK) {
- // GPINT was executed, get response
- dmub_srv_get_gpint_response(srv, &raw_state);
+ if (dc_wake_and_execute_gpint(dmub->ctx, DMUB_GPINT__GET_PSR_STATE, panel_inst, &raw_state,
+ DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) {
*state = convert_psr_state(raw_state);
- } else
+ } else {
// Return invalid state when GPINT times out
*state = PSR_STATE_INVALID;
-
+ }
} while (++retry_count <= 1000 && *state == PSR_STATE_INVALID);
// Assert if max retry hit
@@ -171,7 +166,7 @@ static bool dmub_psr_set_version(struct dmub_psr *dmub, struct dc_stream_state *
cmd.psr_set_version.psr_set_version_data.panel_inst = panel_inst;
cmd.psr_set_version.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_version_data);
- dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
return true;
}
@@ -199,7 +194,7 @@ static void dmub_psr_enable(struct dmub_psr *dmub, bool enable, bool wait, uint8
cmd.psr_enable.header.payload_bytes = 0; // Send header only
- dm_execute_dmub_cmd(dc->dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc->dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
/* Below loops 1000 x 500us = 500 ms.
* Exit PSR may need to wait 1-2 frames to power up. Timeout after at
@@ -248,7 +243,7 @@ static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level, uint8_
cmd.psr_set_level.psr_set_level_data.psr_level = psr_level;
cmd.psr_set_level.psr_set_level_data.cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
cmd.psr_set_level.psr_set_level_data.panel_inst = panel_inst;
- dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
/*
@@ -267,7 +262,7 @@ static void dmub_psr_set_sink_vtotal_in_psr_active(struct dmub_psr *dmub,
cmd.psr_set_vtotal.psr_set_vtotal_data.psr_vtotal_idle = psr_vtotal_idle;
cmd.psr_set_vtotal.psr_set_vtotal_data.psr_vtotal_su = psr_vtotal_su;
- dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
/*
@@ -286,7 +281,7 @@ static void dmub_psr_set_power_opt(struct dmub_psr *dmub, unsigned int power_opt
cmd.psr_set_power_opt.psr_set_power_opt_data.power_opt = power_opt;
cmd.psr_set_power_opt.psr_set_power_opt_data.panel_inst = panel_inst;
- dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
/*
@@ -423,7 +418,7 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
copy_settings_data->relock_delay_frame_cnt = 2;
copy_settings_data->dsc_slice_height = psr_context->dsc_slice_height;
- dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
return true;
}
@@ -444,7 +439,7 @@ static void dmub_psr_force_static(struct dmub_psr *dmub, uint8_t panel_inst)
cmd.psr_force_static.header.sub_type = DMUB_CMD__PSR_FORCE_STATIC;
cmd.psr_enable.header.payload_bytes = 0;
- dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
/*
@@ -452,13 +447,11 @@ static void dmub_psr_force_static(struct dmub_psr *dmub, uint8_t panel_inst)
*/
static void dmub_psr_get_residency(struct dmub_psr *dmub, uint32_t *residency, uint8_t panel_inst)
{
- struct dmub_srv *srv = dmub->ctx->dmub_srv->dmub;
uint16_t param = (uint16_t)(panel_inst << 8);
/* Send gpint command and wait for ack */
- dmub_srv_send_gpint_command(srv, DMUB_GPINT__PSR_RESIDENCY, param, 30);
-
- dmub_srv_get_gpint_response(srv, residency);
+ dc_wake_and_execute_gpint(dmub->ctx, DMUB_GPINT__PSR_RESIDENCY, param, residency,
+ DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
}
static const struct dmub_psr_funcs psr_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
index 28149e53c2a6..38e4797e9476 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
@@ -258,13 +258,97 @@ static void dmub_replay_residency(struct dmub_replay *dmub, uint8_t panel_inst,
*residency = 0;
}
+/**
+ * Set REPLAY power optimization flags and coasting vtotal.
+ */
+static void dmub_replay_set_power_opt_and_coasting_vtotal(struct dmub_replay *dmub,
+ unsigned int power_opt, uint8_t panel_inst, uint16_t coasting_vtotal)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = dmub->ctx;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.replay_set_power_opt_and_coasting_vtotal.header.type = DMUB_CMD__REPLAY;
+ cmd.replay_set_power_opt_and_coasting_vtotal.header.sub_type =
+ DMUB_CMD__REPLAY_SET_POWER_OPT_AND_COASTING_VTOTAL;
+ cmd.replay_set_power_opt_and_coasting_vtotal.header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_replay_set_power_opt_and_coasting_vtotal);
+ cmd.replay_set_power_opt_and_coasting_vtotal.replay_set_power_opt_data.power_opt = power_opt;
+ cmd.replay_set_power_opt_and_coasting_vtotal.replay_set_power_opt_data.panel_inst = panel_inst;
+ cmd.replay_set_power_opt_and_coasting_vtotal.replay_set_coasting_vtotal_data.coasting_vtotal = coasting_vtotal;
+
+ dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+}
+
+/**
+ * send Replay general cmd to DMUB.
+ */
+static void dmub_replay_send_cmd(struct dmub_replay *dmub,
+ enum replay_FW_Message_type msg, union dmub_replay_cmd_set *cmd_element)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *ctx = NULL;
+
+ if (dmub == NULL || cmd_element == NULL)
+ return;
+
+ ctx = dmub->ctx;
+ if (ctx != NULL) {
+
+ if (msg != Replay_Msg_Not_Support) {
+ memset(&cmd, 0, sizeof(cmd));
+ //Header
+ cmd.replay_set_timing_sync.header.type = DMUB_CMD__REPLAY;
+ } else
+ return;
+ } else
+ return;
+
+ switch (msg) {
+ case Replay_Set_Timing_Sync_Supported:
+ //Header
+ cmd.replay_set_timing_sync.header.sub_type =
+ DMUB_CMD__REPLAY_SET_TIMING_SYNC_SUPPORTED;
+ cmd.replay_set_timing_sync.header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_replay_set_timing_sync);
+ //Cmd Body
+ cmd.replay_set_timing_sync.replay_set_timing_sync_data.panel_inst =
+ cmd_element->sync_data.panel_inst;
+ cmd.replay_set_timing_sync.replay_set_timing_sync_data.timing_sync_supported =
+ cmd_element->sync_data.timing_sync_supported;
+ break;
+ case Replay_Set_Residency_Frameupdate_Timer:
+ //Header
+ cmd.replay_set_frameupdate_timer.header.sub_type =
+ DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER;
+ cmd.replay_set_frameupdate_timer.header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_replay_set_frameupdate_timer);
+ //Cmd Body
+ cmd.replay_set_frameupdate_timer.data.panel_inst =
+ cmd_element->panel_inst;
+ cmd.replay_set_frameupdate_timer.data.enable =
+ cmd_element->timer_data.enable;
+ cmd.replay_set_frameupdate_timer.data.frameupdate_count =
+ cmd_element->timer_data.frameupdate_count;
+ break;
+ case Replay_Msg_Not_Support:
+ default:
+ return;
+ break;
+ }
+
+ dc_wake_and_execute_dmub_cmd(ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+}
+
static const struct dmub_replay_funcs replay_funcs = {
- .replay_copy_settings = dmub_replay_copy_settings,
- .replay_enable = dmub_replay_enable,
- .replay_get_state = dmub_replay_get_state,
- .replay_set_power_opt = dmub_replay_set_power_opt,
- .replay_set_coasting_vtotal = dmub_replay_set_coasting_vtotal,
- .replay_residency = dmub_replay_residency,
+ .replay_copy_settings = dmub_replay_copy_settings,
+ .replay_enable = dmub_replay_enable,
+ .replay_get_state = dmub_replay_get_state,
+ .replay_set_power_opt = dmub_replay_set_power_opt,
+ .replay_set_coasting_vtotal = dmub_replay_set_coasting_vtotal,
+ .replay_residency = dmub_replay_residency,
+ .replay_set_power_opt_and_coasting_vtotal = dmub_replay_set_power_opt_and_coasting_vtotal,
+ .replay_send_cmd = dmub_replay_send_cmd,
};
/*
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h
index b3ee90a0b8b3..3613aff994d7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h
@@ -51,6 +51,8 @@ struct dmub_replay_funcs {
uint8_t panel_inst);
void (*replay_residency)(struct dmub_replay *dmub,
uint8_t panel_inst, uint32_t *residency, const bool is_start, const bool is_alpm);
+ void (*replay_set_power_opt_and_coasting_vtotal)(struct dmub_replay *dmub,
+ unsigned int power_opt, uint8_t panel_inst, uint16_t coasting_vtotal);
};
struct dmub_replay *dmub_replay_create(struct dc_context *ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
index 0dd62934a18c..ae6a131be71b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
@@ -22,7 +22,7 @@
#
# Makefile for DCN.
-DCN10 = dcn10_init.o dcn10_ipp.o \
+DCN10 = dcn10_ipp.o \
dcn10_hw_sequencer_debug.o \
dcn10_dpp.o dcn10_opp.o \
dcn10_hubp.o dcn10_mpc.o \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
index bd760442ff89..3dae3943b056 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
@@ -2,7 +2,7 @@
#
# Makefile for DCN.
-DCN20 = dcn20_init.o dcn20_dpp.o dcn20_dpp_cm.o dcn20_hubp.o \
+DCN20 = dcn20_dpp.o dcn20_dpp_cm.o dcn20_hubp.o \
dcn20_mpc.o dcn20_opp.o dcn20_hubbub.o dcn20_mmhubbub.o \
dcn20_stream_encoder.o dcn20_link_encoder.o dcn20_dccg.o \
dcn20_vmid.o dcn20_dwb.o dcn20_dwb_scl.o
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
index 76da59d8caaf..ef5c22f41563 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
@@ -296,6 +296,38 @@
type DTBCLK_P1_GATE_DISABLE;\
type DTBCLK_P2_GATE_DISABLE;\
type DTBCLK_P3_GATE_DISABLE;\
+ type DSCCLK0_ROOT_GATE_DISABLE;\
+ type DSCCLK1_ROOT_GATE_DISABLE;\
+ type DSCCLK2_ROOT_GATE_DISABLE;\
+ type DSCCLK3_ROOT_GATE_DISABLE;\
+ type SYMCLKA_FE_ROOT_GATE_DISABLE;\
+ type SYMCLKB_FE_ROOT_GATE_DISABLE;\
+ type SYMCLKC_FE_ROOT_GATE_DISABLE;\
+ type SYMCLKD_FE_ROOT_GATE_DISABLE;\
+ type SYMCLKE_FE_ROOT_GATE_DISABLE;\
+ type DPPCLK0_ROOT_GATE_DISABLE;\
+ type DPPCLK1_ROOT_GATE_DISABLE;\
+ type DPPCLK2_ROOT_GATE_DISABLE;\
+ type DPPCLK3_ROOT_GATE_DISABLE;\
+ type HDMISTREAMCLK0_ROOT_GATE_DISABLE;\
+ type SYMCLKA_ROOT_GATE_DISABLE;\
+ type SYMCLKB_ROOT_GATE_DISABLE;\
+ type SYMCLKC_ROOT_GATE_DISABLE;\
+ type SYMCLKD_ROOT_GATE_DISABLE;\
+ type SYMCLKE_ROOT_GATE_DISABLE;\
+ type PHYA_REFCLK_ROOT_GATE_DISABLE;\
+ type PHYB_REFCLK_ROOT_GATE_DISABLE;\
+ type PHYC_REFCLK_ROOT_GATE_DISABLE;\
+ type PHYD_REFCLK_ROOT_GATE_DISABLE;\
+ type PHYE_REFCLK_ROOT_GATE_DISABLE;\
+ type DPSTREAMCLK0_ROOT_GATE_DISABLE;\
+ type DPSTREAMCLK1_ROOT_GATE_DISABLE;\
+ type DPSTREAMCLK2_ROOT_GATE_DISABLE;\
+ type DPSTREAMCLK3_ROOT_GATE_DISABLE;\
+ type DPSTREAMCLK0_GATE_DISABLE;\
+ type DPSTREAMCLK1_GATE_DISABLE;\
+ type DPSTREAMCLK2_GATE_DISABLE;\
+ type DPSTREAMCLK3_GATE_DISABLE;\
struct dccg_shift {
DCCG_REG_FIELD_LIST(uint8_t)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
index 139cf31d2e45..89c3bf0fe0c9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
@@ -1077,8 +1077,16 @@ void hubp2_cursor_set_position(
if (src_y_offset < 0)
src_y_offset = 0;
/* Save necessary cursor info x, y position. w, h is saved in attribute func. */
- hubp->cur_rect.x = src_x_offset + param->viewport.x;
- hubp->cur_rect.y = src_y_offset + param->viewport.y;
+ if (param->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
+ param->rotation != ROTATION_ANGLE_0) {
+ hubp->cur_rect.x = 0;
+ hubp->cur_rect.y = 0;
+ hubp->cur_rect.w = param->stream->timing.h_addressable;
+ hubp->cur_rect.h = param->stream->timing.v_addressable;
+ } else {
+ hubp->cur_rect.x = src_x_offset + param->viewport.x;
+ hubp->cur_rect.y = src_y_offset + param->viewport.y;
+ }
}
void hubp2_clk_cntl(struct hubp *hubp, bool enable)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/Makefile b/drivers/gpu/drm/amd/display/dc/dcn201/Makefile
index a101e6511555..2b0b4f32e13b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/Makefile
@@ -1,8 +1,7 @@
# SPDX-License-Identifier: MIT
#
# Makefile for DCN.
-DCN201 = dcn201_init.o \
- dcn201_hubbub.o\
+DCN201 = dcn201_hubbub.o\
dcn201_mpc.o dcn201_hubp.o dcn201_opp.o dcn201_dpp.o \
dcn201_dccg.o dcn201_link_encoder.o
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
index dd1eea7212f4..ca92f5c8e7fb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
@@ -2,7 +2,7 @@
#
# Makefile for DCN21.
-DCN21 = dcn21_init.o dcn21_hubp.o dcn21_hubbub.o \
+DCN21 = dcn21_hubp.o dcn21_hubbub.o \
dcn21_link_encoder.o dcn21_dccg.o
AMD_DAL_DCN21 = $(addprefix $(AMDDALPATH)/dc/dcn21/,$(DCN21))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
index 68cad55c72ab..e13d69a22c1c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
@@ -691,7 +691,7 @@ static void dmcub_PLAT_54186_wa(struct hubp *hubp,
cmd.PLAT_54186_wa.flip.flip_params.vmid = flip_regs->vmid;
PERF_TRACE(); // TODO: remove after performance is stable.
- dm_execute_dmub_cmd(hubp->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(hubp->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
PERF_TRACE(); // TODO: remove after performance is stable.
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
index cd95f322235e..b5b2aa3b3783 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
@@ -23,9 +23,7 @@
#
#
-DCN30 := \
- dcn30_init.o \
- dcn30_hubbub.o \
+DCN30 := dcn30_hubbub.o \
dcn30_hubp.o \
dcn30_dpp.o \
dcn30_dccg.o \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c
index 0d98918bf0fc..1b9d9495f76d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c
@@ -130,6 +130,28 @@ bool dwb3_disable(struct dwbc *dwbc)
return true;
}
+void dwb3_set_fc_enable(struct dwbc *dwbc, enum dwb_frame_capture_enable enable)
+{
+ struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc);
+ unsigned int pre_locked;
+
+ REG_GET(DWB_UPDATE_CTRL, DWB_UPDATE_LOCK, &pre_locked);
+
+ /* Lock DWB registers */
+ if (pre_locked == 0)
+ REG_UPDATE(DWB_UPDATE_CTRL, DWB_UPDATE_LOCK, 1);
+
+ /* Disable FC */
+ REG_UPDATE(FC_MODE_CTRL, FC_FRAME_CAPTURE_EN, enable);
+
+ /* Unlock DWB registers */
+ if (pre_locked == 0)
+ REG_UPDATE(DWB_UPDATE_CTRL, DWB_UPDATE_LOCK, 0);
+
+ DC_LOG_DWB("%s dwb3_fc_disabled at inst = %d", __func__, dwbc->inst);
+}
+
+
bool dwb3_update(struct dwbc *dwbc, struct dc_dwb_params *params)
{
struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc);
@@ -226,6 +248,7 @@ static const struct dwbc_funcs dcn30_dwbc_funcs = {
.disable = dwb3_disable,
.update = dwb3_update,
.is_enabled = dwb3_is_enabled,
+ .set_fc_enable = dwb3_set_fc_enable,
.set_stereo = dwb3_set_stereo,
.set_new_content = dwb3_set_new_content,
.dwb_program_output_csc = NULL,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h
index a5d1b81e768d..332634b76aac 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h
@@ -877,6 +877,8 @@ bool dwb3_update(struct dwbc *dwbc, struct dc_dwb_params *params);
bool dwb3_is_enabled(struct dwbc *dwbc);
+void dwb3_set_fc_enable(struct dwbc *dwbc, enum dwb_frame_capture_enable enable);
+
void dwb3_set_stereo(struct dwbc *dwbc,
struct dwb_stereo_params *stereo_params);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c
index 701c7d8bc038..03a50c32fcfe 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c
@@ -243,6 +243,9 @@ static bool dwb3_program_ogam_lut(
return false;
}
+ if (params->hw_points_num == 0)
+ return false;
+
REG_SET(DWB_OGAM_CONTROL, 0, DWB_OGAM_MODE, 2);
current_mode = dwb3_get_ogam_current(dwbc30);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
index 090011300dcd..d241f665e40a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
@@ -10,7 +10,7 @@
#
# Makefile for dcn30.
-DCN301 = dcn301_init.o dcn301_dccg.o \
+DCN301 = dcn301_dccg.o \
dcn301_dio_link_encoder.o dcn301_panel_cntl.o dcn301_hubbub.o
AMD_DAL_DCN301 = $(addprefix $(AMDDALPATH)/dc/dcn301/,$(DCN301))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/Makefile b/drivers/gpu/drm/amd/display/dc/dcn302/Makefile
deleted file mode 100644
index 0fcd03569d74..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dcn302/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-#
-# (c) Copyright 2020 Advanced Micro Devices, Inc. All the rights reserved
-#
-# Authors: AMD
-#
-# Makefile for dcn302.
-
-DCN3_02 = dcn302_init.o
-
-AMD_DAL_DCN3_02 = $(addprefix $(AMDDALPATH)/dc/dcn302/,$(DCN3_02))
-
-AMD_DISPLAY_FILES += $(AMD_DAL_DCN3_02)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/Makefile b/drivers/gpu/drm/amd/display/dc/dcn31/Makefile
index 11a2662e58ef..5d93ac16c03a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/Makefile
@@ -10,7 +10,7 @@
#
# Makefile for dcn31.
-DCN31 = dcn31_hubbub.o dcn31_init.o dcn31_hubp.o \
+DCN31 = dcn31_hubbub.o dcn31_hubp.o \
dcn31_dccg.o dcn31_dio_link_encoder.o dcn31_panel_cntl.o \
dcn31_apg.o dcn31_hpo_dp_stream_encoder.o dcn31_hpo_dp_link_encoder.o \
dcn31_afmt.o dcn31_vpg.o
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
index 4596f3bac1b4..26be5fee7411 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
@@ -125,7 +125,7 @@ static bool query_dp_alt_from_dmub(struct link_encoder *enc,
cmd->query_dp_alt.header.payload_bytes = sizeof(cmd->query_dp_alt.data);
cmd->query_dp_alt.data.phy_id = phy_id_from_transmitter(enc10->base.transmitter);
- if (!dm_execute_dmub_cmd(enc->ctx, cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
+ if (!dc_wake_and_execute_dmub_cmd(enc->ctx, cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
return false;
return true;
@@ -436,7 +436,7 @@ static bool link_dpia_control(struct dc_context *dc_ctx,
cmd.dig1_dpia_control.dpia_control = *dpia_control;
- dm_execute_dmub_cmd(dc_ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
index 217acd4e292a..03248422d6ff 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
@@ -50,9 +50,9 @@ static bool dcn31_query_backlight_info(struct panel_cntl *panel_cntl, union dmub
cmd->panel_cntl.header.type = DMUB_CMD__PANEL_CNTL;
cmd->panel_cntl.header.sub_type = DMUB_CMD__PANEL_CNTL_QUERY_BACKLIGHT_INFO;
cmd->panel_cntl.header.payload_bytes = sizeof(cmd->panel_cntl.data);
- cmd->panel_cntl.data.inst = dcn31_panel_cntl->base.inst;
+ cmd->panel_cntl.data.pwrseq_inst = dcn31_panel_cntl->base.pwrseq_inst;
- return dm_execute_dmub_cmd(dc_dmub_srv->ctx, cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
+ return dc_wake_and_execute_dmub_cmd(dc_dmub_srv->ctx, cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
}
static uint32_t dcn31_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_cntl)
@@ -78,14 +78,14 @@ static uint32_t dcn31_panel_cntl_hw_init(struct panel_cntl *panel_cntl)
cmd.panel_cntl.header.type = DMUB_CMD__PANEL_CNTL;
cmd.panel_cntl.header.sub_type = DMUB_CMD__PANEL_CNTL_HW_INIT;
cmd.panel_cntl.header.payload_bytes = sizeof(cmd.panel_cntl.data);
- cmd.panel_cntl.data.inst = dcn31_panel_cntl->base.inst;
+ cmd.panel_cntl.data.pwrseq_inst = dcn31_panel_cntl->base.pwrseq_inst;
cmd.panel_cntl.data.bl_pwm_cntl = panel_cntl->stored_backlight_registers.BL_PWM_CNTL;
cmd.panel_cntl.data.bl_pwm_period_cntl = panel_cntl->stored_backlight_registers.BL_PWM_PERIOD_CNTL;
cmd.panel_cntl.data.bl_pwm_ref_div1 =
panel_cntl->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV;
cmd.panel_cntl.data.bl_pwm_ref_div2 =
panel_cntl->stored_backlight_registers.PANEL_PWRSEQ_REF_DIV2;
- if (!dm_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
+ if (!dc_wake_and_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
return 0;
panel_cntl->stored_backlight_registers.BL_PWM_CNTL = cmd.panel_cntl.data.bl_pwm_cntl;
@@ -157,4 +157,5 @@ void dcn31_panel_cntl_construct(
dcn31_panel_cntl->base.funcs = &dcn31_link_panel_cntl_funcs;
dcn31_panel_cntl->base.ctx = init_data->ctx;
dcn31_panel_cntl->base.inst = init_data->inst;
+ dcn31_panel_cntl->base.pwrseq_inst = init_data->pwrseq_inst;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/Makefile b/drivers/gpu/drm/amd/display/dc/dcn314/Makefile
index d5c177346a3b..b134ab05aa71 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/Makefile
@@ -10,8 +10,7 @@
#
# Makefile for dcn314.
-DCN314 = dcn314_init.o \
- dcn314_dio_stream_encoder.o dcn314_dccg.o
+DCN314 = dcn314_dio_stream_encoder.o dcn314_dccg.o
AMD_DAL_DCN314 = $(addprefix $(AMDDALPATH)/dc/dcn314/,$(DCN314))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/Makefile b/drivers/gpu/drm/amd/display/dc/dcn32/Makefile
index 905b74b53092..5314770fff1c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/Makefile
@@ -10,7 +10,7 @@
#
# Makefile for dcn32.
-DCN32 = dcn32_hubbub.o dcn32_init.o dcn32_dccg.o \
+DCN32 = dcn32_hubbub.o dcn32_dccg.o \
dcn32_mmhubbub.o dcn32_dpp.o dcn32_hubp.o dcn32_mpc.o \
dcn32_dio_stream_encoder.o dcn32_dio_link_encoder.o dcn32_resource_helpers.o \
dcn32_hpo_dp_link_encoder.o
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
index 994b21ed272f..e789e654c387 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
@@ -71,12 +71,13 @@ void mpc32_power_on_blnd_lut(
{
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+ REG_SET(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], 0, MPCC_MCM_1DLUT_MEM_PWR_DIS, power_on);
+
if (mpc->ctx->dc->debug.enable_mem_low_power.bits.cm) {
if (power_on) {
REG_UPDATE(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], MPCC_MCM_1DLUT_MEM_PWR_FORCE, 0);
REG_WAIT(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], MPCC_MCM_1DLUT_MEM_PWR_STATE, 0, 1, 5);
} else if (!mpc->ctx->dc->debug.disable_mem_low_power) {
- ASSERT(false);
/* TODO: change to mpc
* dpp_base->ctx->dc->optimized_required = true;
* dpp_base->deferred_reg_writes.bits.disable_blnd_lut = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
index 389ac7ae1154..87760600e154 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
@@ -28,6 +28,7 @@
#include "dcn20/dcn20_resource.h"
#include "dml/dcn32/display_mode_vba_util_32.h"
#include "dml/dcn32/dcn32_fpu.h"
+#include "dc_state_priv.h"
static bool is_dual_plane(enum surface_pixel_format format)
{
@@ -190,7 +191,7 @@ bool dcn32_subvp_in_use(struct dc *dc,
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE)
+ if (dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_NONE)
return true;
}
return false;
@@ -264,18 +265,17 @@ static void override_det_for_subvp(struct dc *dc, struct dc_state *context, uint
// Do not override if a stream has multiple planes
for (i = 0; i < context->stream_count; i++) {
- if (context->stream_status[i].plane_count > 1) {
+ if (context->stream_status[i].plane_count > 1)
return;
- }
- if (context->streams[i]->mall_stream_config.type != SUBVP_PHANTOM) {
+
+ if (dc_state_get_stream_subvp_type(context, context->streams[i]) != SUBVP_PHANTOM)
stream_count++;
- }
}
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- if (pipe_ctx->stream && pipe_ctx->plane_state && pipe_ctx->stream->mall_stream_config.type != SUBVP_PHANTOM) {
+ if (pipe_ctx->stream && pipe_ctx->plane_state && dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) {
if (dcn32_allow_subvp_high_refresh_rate(dc, context, pipe_ctx)) {
if (pipe_ctx->stream->timing.v_addressable == 1080 && pipe_ctx->stream->timing.h_addressable == 1920) {
@@ -290,7 +290,7 @@ static void override_det_for_subvp(struct dc *dc, struct dc_state *context, uint
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- if (pipe_ctx->stream && pipe_ctx->plane_state && pipe_ctx->stream->mall_stream_config.type != SUBVP_PHANTOM) {
+ if (pipe_ctx->stream && pipe_ctx->plane_state && dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) {
if (pipe_ctx->stream->timing.v_addressable == 1080 && pipe_ctx->stream->timing.h_addressable == 1920) {
if (pipe_segments[i] > 4)
pipe_segments[i] = 4;
@@ -337,14 +337,14 @@ void dcn32_determine_det_override(struct dc *dc,
for (i = 0; i < context->stream_count; i++) {
/* Don't count SubVP streams for DET allocation */
- if (context->streams[i]->mall_stream_config.type != SUBVP_PHANTOM)
+ if (dc_state_get_stream_subvp_type(context, context->streams[i]) != SUBVP_PHANTOM)
stream_count++;
}
if (stream_count > 0) {
stream_segments = 18 / stream_count;
for (i = 0; i < context->stream_count; i++) {
- if (context->streams[i]->mall_stream_config.type == SUBVP_PHANTOM)
+ if (dc_state_get_stream_subvp_type(context, context->streams[i]) == SUBVP_PHANTOM)
continue;
if (context->stream_status[i].plane_count > 0)
@@ -430,71 +430,6 @@ void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context,
dcn32_determine_det_override(dc, context, pipes);
}
-/**
- * dcn32_save_mall_state(): Save MALL (SubVP) state for fast validation cases
- *
- * This function saves the MALL (SubVP) case for fast validation cases. For fast validation,
- * there are situations where a shallow copy of the dc->current_state is created for the
- * validation. In this case we want to save and restore the mall config because we always
- * teardown subvp at the beginning of validation (and don't attempt to add it back if it's
- * fast validation). If we don't restore the subvp config in cases of fast validation +
- * shallow copy of the dc->current_state, the dc->current_state will have a partially
- * removed subvp state when we did not intend to remove it.
- *
- * NOTE: This function ONLY works if the streams are not moved to a different pipe in the
- * validation. We don't expect this to happen in fast_validation=1 cases.
- *
- * @dc: Current DC state
- * @context: New DC state to be programmed
- * @temp_config: struct used to cache the existing MALL state
- *
- * Return: void
- */
-void dcn32_save_mall_state(struct dc *dc,
- struct dc_state *context,
- struct mall_temp_config *temp_config)
-{
- uint32_t i;
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
- if (pipe->stream)
- temp_config->mall_stream_config[i] = pipe->stream->mall_stream_config;
-
- if (pipe->plane_state)
- temp_config->is_phantom_plane[i] = pipe->plane_state->is_phantom;
- }
-}
-
-/**
- * dcn32_restore_mall_state(): Restore MALL (SubVP) state for fast validation cases
- *
- * Restore the MALL state based on the previously saved state from dcn32_save_mall_state
- *
- * @dc: Current DC state
- * @context: New DC state to be programmed, restore MALL state into here
- * @temp_config: struct that has the cached MALL state
- *
- * Return: void
- */
-void dcn32_restore_mall_state(struct dc *dc,
- struct dc_state *context,
- struct mall_temp_config *temp_config)
-{
- uint32_t i;
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
- if (pipe->stream)
- pipe->stream->mall_stream_config = temp_config->mall_stream_config[i];
-
- if (pipe->plane_state)
- pipe->plane_state->is_phantom = temp_config->is_phantom_plane[i];
- }
-}
-
#define MAX_STRETCHED_V_BLANK 1000 // in micro-seconds (must ensure to match value in FW)
/*
* Scaling factor for v_blank stretch calculations considering timing in
@@ -589,13 +524,14 @@ static int get_refresh_rate(struct dc_stream_state *fpo_candidate_stream)
*
* Return: Pointer to FPO stream candidate if config can support FPO, otherwise NULL
*/
-struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, const struct dc_state *context)
+struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, struct dc_state *context)
{
int refresh_rate = 0;
const int minimum_refreshrate_supported = 120;
struct dc_stream_state *fpo_candidate_stream = NULL;
bool is_fpo_vactive = false;
uint32_t fpo_vactive_margin_us = 0;
+ struct dc_stream_status *fpo_stream_status = NULL;
if (context == NULL)
return NULL;
@@ -618,16 +554,28 @@ struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stre
DC_FP_START();
dcn32_assign_fpo_vactive_candidate(dc, context, &fpo_candidate_stream);
DC_FP_END();
-
+ if (fpo_candidate_stream)
+ fpo_stream_status = dc_state_get_stream_status(context, fpo_candidate_stream);
DC_FP_START();
is_fpo_vactive = dcn32_find_vactive_pipe(dc, context, dc->debug.fpo_vactive_min_active_margin_us);
DC_FP_END();
if (!is_fpo_vactive || dc->debug.disable_fpo_vactive)
return NULL;
- } else
+ } else {
fpo_candidate_stream = context->streams[0];
+ if (fpo_candidate_stream)
+ fpo_stream_status = dc_state_get_stream_status(context, fpo_candidate_stream);
+ }
- if (!fpo_candidate_stream)
+ /* In DCN32/321, FPO uses per-pipe P-State force.
+ * If there's no planes, HUBP is power gated and
+ * therefore programming UCLK_PSTATE_FORCE does
+ * nothing (P-State will always be asserted naturally
+ * on a pipe that has HUBP power gated. Therefore we
+ * only want to enable FPO if the FPO pipe has both
+ * a stream and a plane.
+ */
+ if (!fpo_candidate_stream || !fpo_stream_status || fpo_stream_status->plane_count == 0)
return NULL;
if (fpo_candidate_stream->sink->edid_caps.panel_patch.disable_fams)
@@ -716,10 +664,11 @@ bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context)
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe);
if (resource_is_pipe_type(pipe, OPP_HEAD) &&
resource_is_pipe_type(pipe, DPP_PIPE)) {
- if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
+ if (pipe_mall_type == SUBVP_MAIN) {
subvp_count++;
subvp_disallow |= disallow_subvp_in_active_plus_blank(pipe);
@@ -728,7 +677,7 @@ bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context)
refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total);
}
- if (pipe->stream->mall_stream_config.type == SUBVP_NONE) {
+ if (pipe_mall_type == SUBVP_NONE) {
non_subvp_pipes++;
drr_psr_capable = (drr_psr_capable || dcn32_is_psr_capable(pipe));
if (pipe->stream->ignore_msa_timing_param &&
@@ -776,10 +725,11 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe);
if (resource_is_pipe_type(pipe, OPP_HEAD) &&
resource_is_pipe_type(pipe, DPP_PIPE)) {
- if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
+ if (pipe_mall_type == SUBVP_MAIN) {
subvp_count++;
subvp_disallow |= disallow_subvp_in_active_plus_blank(pipe);
@@ -788,7 +738,7 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total);
}
- if (pipe->stream->mall_stream_config.type == SUBVP_NONE) {
+ if (pipe_mall_type == SUBVP_NONE) {
non_subvp_pipes++;
vblank_psr_capable = (vblank_psr_capable || dcn32_is_psr_capable(pipe));
if (pipe->stream->ignore_msa_timing_param &&
@@ -806,3 +756,29 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
return result;
}
+
+void dcn32_update_dml_pipes_odm_policy_based_on_context(struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes)
+{
+ int i, pipe_cnt;
+ struct resource_context *res_ctx = &context->res_ctx;
+ struct pipe_ctx *pipe = NULL;
+
+ for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
+ int odm_slice_count = 0;
+
+ if (!res_ctx->pipe_ctx[i].stream)
+ continue;
+ pipe = &res_ctx->pipe_ctx[i];
+ odm_slice_count = resource_get_odm_slice_count(pipe);
+
+ if (odm_slice_count == 1)
+ pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
+ else if (odm_slice_count == 2)
+ pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1;
+ else if (odm_slice_count == 4)
+ pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_4to1;
+
+ pipe_cnt++;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/Makefile b/drivers/gpu/drm/amd/display/dc/dcn35/Makefile
index fa7ec82ae5f5..0e317e0c36a0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn35/Makefile
@@ -10,7 +10,7 @@
#
# Makefile for DCN35.
-DCN35 = dcn35_init.o dcn35_dio_stream_encoder.o \
+DCN35 = dcn35_dio_stream_encoder.o \
dcn35_dio_link_encoder.o dcn35_dccg.o \
dcn35_hubp.o dcn35_hubbub.o \
dcn35_mmhubbub.o dcn35_opp.o dcn35_dpp.o dcn35_pg_cntl.o dcn35_dwb.o
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c
index 142efd390d86..f1ba7bb792ea 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c
@@ -506,6 +506,64 @@ static void dccg35_dpp_root_clock_control(
dccg->dpp_clock_gated[dpp_inst] = !clock_on;
}
+static void dccg35_disable_symclk32_se(
+ struct dccg *dccg,
+ int hpo_se_inst)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ /* set refclk as the source for symclk32_se */
+ switch (hpo_se_inst) {
+ case 0:
+ REG_UPDATE_2(SYMCLK32_SE_CNTL,
+ SYMCLK32_SE0_SRC_SEL, 0,
+ SYMCLK32_SE0_EN, 0);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) {
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_SE0_GATE_DISABLE, 0);
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+// SYMCLK32_ROOT_SE0_GATE_DISABLE, 0);
+ }
+ break;
+ case 1:
+ REG_UPDATE_2(SYMCLK32_SE_CNTL,
+ SYMCLK32_SE1_SRC_SEL, 0,
+ SYMCLK32_SE1_EN, 0);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) {
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_SE1_GATE_DISABLE, 0);
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+// SYMCLK32_ROOT_SE1_GATE_DISABLE, 0);
+ }
+ break;
+ case 2:
+ REG_UPDATE_2(SYMCLK32_SE_CNTL,
+ SYMCLK32_SE2_SRC_SEL, 0,
+ SYMCLK32_SE2_EN, 0);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) {
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_SE2_GATE_DISABLE, 0);
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+// SYMCLK32_ROOT_SE2_GATE_DISABLE, 0);
+ }
+ break;
+ case 3:
+ REG_UPDATE_2(SYMCLK32_SE_CNTL,
+ SYMCLK32_SE3_SRC_SEL, 0,
+ SYMCLK32_SE3_EN, 0);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) {
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_SE3_GATE_DISABLE, 0);
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+// SYMCLK32_ROOT_SE3_GATE_DISABLE, 0);
+ }
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
void dccg35_init(struct dccg *dccg)
{
int otg_inst;
@@ -514,7 +572,7 @@ void dccg35_init(struct dccg *dccg)
* will cause DCN to hang.
*/
for (otg_inst = 0; otg_inst < 4; otg_inst++)
- dccg31_disable_symclk32_se(dccg, otg_inst);
+ dccg35_disable_symclk32_se(dccg, otg_inst);
if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
for (otg_inst = 0; otg_inst < 2; otg_inst++)
@@ -788,7 +846,7 @@ static const struct dccg_funcs dccg35_funcs = {
.dccg_init = dccg35_init,
.set_dpstreamclk = dccg35_set_dpstreamclk,
.enable_symclk32_se = dccg31_enable_symclk32_se,
- .disable_symclk32_se = dccg31_disable_symclk32_se,
+ .disable_symclk32_se = dccg35_disable_symclk32_se,
.enable_symclk32_le = dccg31_enable_symclk32_le,
.disable_symclk32_le = dccg31_disable_symclk32_le,
.set_symclk32_le_root_clock_gating = dccg31_set_symclk32_le_root_clock_gating,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.h
index bde48bee0119..1586a45ca3bd 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.h
@@ -34,6 +34,7 @@
#define DCCG_REG_LIST_DCN35() \
DCCG_REG_LIST_DCN314(),\
SR(DPPCLK_CTRL),\
+ SR(DCCG_GATE_DISABLE_CNTL4),\
SR(DCCG_GATE_DISABLE_CNTL5),\
SR(DCCG_GATE_DISABLE_CNTL6),\
SR(DCCG_GLOBAL_FGCG_REP_CNTL),\
@@ -180,6 +181,56 @@
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P1_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P2_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P3_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKA_FE_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKB_FE_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKC_FE_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKD_FE_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKE_FE_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DPPCLK1_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DPPCLK2_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DPPCLK3_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL2, HDMICHARCLK0_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL4, HDMICHARCLK0_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL6, HDMISTREAMCLK0_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKA_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKB_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKC_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKD_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKE_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE0_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE1_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE2_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE3_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_LE0_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_LE1_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_SE0_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_SE1_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_SE2_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_SE3_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_LE0_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_LE1_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL4, PHYA_REFCLK_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL4, PHYB_REFCLK_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL4, PHYC_REFCLK_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL4, PHYD_REFCLK_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL4, PHYE_REFCLK_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK0_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(HDMISTREAMCLK0_DTO_PARAM, HDMISTREAMCLK0_DTO_PHASE, mask_sh),\
+ DCCG_SF(HDMISTREAMCLK0_DTO_PARAM, HDMISTREAMCLK0_DTO_MODULO, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL, DISPCLK_DCCG_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, HDMISTREAMCLK0_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK0_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_GATE_DISABLE, mask_sh),\
struct dccg *dccg35_create(
struct dc_context *ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c
index f91e08895275..da94e5309fba 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c
@@ -256,6 +256,10 @@ void dcn35_link_encoder_construct(
enc10->base.features.flags.bits.IS_UHBR10_CAPABLE = bp_cap_info.DP_UHBR10_EN;
enc10->base.features.flags.bits.IS_UHBR13_5_CAPABLE = bp_cap_info.DP_UHBR13_5_EN;
enc10->base.features.flags.bits.IS_UHBR20_CAPABLE = bp_cap_info.DP_UHBR20_EN;
+ if (bp_cap_info.DP_IS_USB_C) {
+ /*BIOS not switch to use CONNECTOR_ID_USBC = 24 yet*/
+ enc10->base.features.flags.bits.DP_IS_USB_C = 1;
+ }
} else {
DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
@@ -264,4 +268,5 @@ void dcn35_link_encoder_construct(
}
if (enc10->base.ctx->dc->debug.hdmi20_disable)
enc10->base.features.flags.bits.HDMI_6GB_EN = 0;
+
}
diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
index 4440d08743aa..bd7ba0a25198 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
@@ -247,6 +247,7 @@ struct pp_smu_funcs_nv {
#define PP_SMU_NUM_MEMCLK_DPM_LEVELS 4
#define PP_SMU_NUM_DCLK_DPM_LEVELS 8
#define PP_SMU_NUM_VCLK_DPM_LEVELS 8
+#define PP_SMU_NUM_VPECLK_DPM_LEVELS 8
struct dpm_clock {
uint32_t Freq; // In MHz
@@ -262,6 +263,7 @@ struct dpm_clocks {
struct dpm_clock MemClocks[PP_SMU_NUM_MEMCLK_DPM_LEVELS];
struct dpm_clock VClocks[PP_SMU_NUM_VCLK_DPM_LEVELS];
struct dpm_clock DClocks[PP_SMU_NUM_DCLK_DPM_LEVELS];
+ struct dpm_clock VPEClocks[PP_SMU_NUM_VPECLK_DPM_LEVELS];
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index ea7d60f9a9b4..6042a5a6a44f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -61,8 +61,12 @@ endif
endif
ifneq ($(CONFIG_FRAME_WARN),0)
+ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
+frame_warn_flag := -Wframe-larger-than=3072
+else
frame_warn_flag := -Wframe-larger-than=2048
endif
+endif
CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_ccflags)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
index ec77b2b41ba3..38ab9ad60ef8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
@@ -33,6 +33,7 @@
#include "link.h"
#include "dcn20_fpu.h"
+#include "dc_state_priv.h"
#define DC_LOGGER \
dc->ctx->logger
@@ -440,7 +441,115 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = {
.use_urgent_burst_bw = 0
};
-struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = { 0 };
+struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = {
+ .clock_limits = {
+ {
+ .state = 0,
+ .dcfclk_mhz = 560.0,
+ .fabricclk_mhz = 560.0,
+ .dispclk_mhz = 513.0,
+ .dppclk_mhz = 513.0,
+ .phyclk_mhz = 540.0,
+ .socclk_mhz = 560.0,
+ .dscclk_mhz = 171.0,
+ .dram_speed_mts = 1069.0,
+ },
+ {
+ .state = 1,
+ .dcfclk_mhz = 694.0,
+ .fabricclk_mhz = 694.0,
+ .dispclk_mhz = 642.0,
+ .dppclk_mhz = 642.0,
+ .phyclk_mhz = 600.0,
+ .socclk_mhz = 694.0,
+ .dscclk_mhz = 214.0,
+ .dram_speed_mts = 1324.0,
+ },
+ {
+ .state = 2,
+ .dcfclk_mhz = 875.0,
+ .fabricclk_mhz = 875.0,
+ .dispclk_mhz = 734.0,
+ .dppclk_mhz = 734.0,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 875.0,
+ .dscclk_mhz = 245.0,
+ .dram_speed_mts = 1670.0,
+ },
+ {
+ .state = 3,
+ .dcfclk_mhz = 1000.0,
+ .fabricclk_mhz = 1000.0,
+ .dispclk_mhz = 1100.0,
+ .dppclk_mhz = 1100.0,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 1000.0,
+ .dscclk_mhz = 367.0,
+ .dram_speed_mts = 2000.0,
+ },
+ {
+ .state = 4,
+ .dcfclk_mhz = 1200.0,
+ .fabricclk_mhz = 1200.0,
+ .dispclk_mhz = 1284.0,
+ .dppclk_mhz = 1284.0,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 1200.0,
+ .dscclk_mhz = 428.0,
+ .dram_speed_mts = 2000.0,
+ },
+ {
+ .state = 5,
+ .dcfclk_mhz = 1200.0,
+ .fabricclk_mhz = 1200.0,
+ .dispclk_mhz = 1284.0,
+ .dppclk_mhz = 1284.0,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 1200.0,
+ .dscclk_mhz = 428.0,
+ .dram_speed_mts = 2000.0,
+ },
+ },
+
+ .num_states = 5,
+ .sr_exit_time_us = 1.9,
+ .sr_enter_plus_exit_time_us = 4.4,
+ .urgent_latency_us = 3.0,
+ .urgent_latency_pixel_data_only_us = 4.0,
+ .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
+ .urgent_latency_vm_data_only_us = 4.0,
+ .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 40.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 40.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
+ .max_avg_sdp_bw_use_normal_percent = 40.0,
+ .max_avg_dram_bw_use_normal_percent = 40.0,
+ .writeback_latency_us = 12.0,
+ .ideal_dram_bw_after_urgent_percent = 40.0,
+ .max_request_size_bytes = 256,
+ .dram_channel_width_bytes = 16,
+ .fabric_datapath_to_dcn_data_return_bytes = 64,
+ .dcn_downspread_percent = 0.5,
+ .downspread_percent = 0.5,
+ .dram_page_open_time_ns = 50.0,
+ .dram_rw_turnaround_time_ns = 17.5,
+ .dram_return_buffer_per_channel_bytes = 8192,
+ .round_trip_ping_latency_dcfclk_cycles = 131,
+ .urgent_out_of_order_return_per_channel_bytes = 4096,
+ .channel_interleave_bytes = 256,
+ .num_banks = 8,
+ .num_chans = 16,
+ .vmm_page_size_bytes = 4096,
+ .dram_clock_change_latency_us = 45.0,
+ .writeback_dram_clock_change_latency_us = 23.0,
+ .return_bus_width_bytes = 64,
+ .dispclk_dppclk_vco_speed_mhz = 3850,
+ .xfc_bus_transport_time_us = 20,
+ .xfc_xbuf_latency_tolerance_us = 50,
+ .use_urgent_burst_bw = 0,
+};
struct _vcs_dpi_ip_params_st dcn2_1_ip = {
.odm_capable = 1,
@@ -1074,7 +1183,7 @@ void dcn20_calculate_dlg_params(struct dc *dc,
pipes[pipe_idx].pipe.dest.vupdate_width = get_vupdate_width(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
pipes[pipe_idx].pipe.dest.vready_offset = get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
- if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ if (dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM) {
// Phantom pipe requires that DET_SIZE = 0 and no unbounded requests
context->res_ctx.pipe_ctx[i].det_buffer_size_kb = 0;
context->res_ctx.pipe_ctx[i].unbounded_req = false;
@@ -1424,7 +1533,7 @@ int dcn20_populate_dml_pipes_from_context(struct dc *dc,
*/
if (res_ctx->pipe_ctx[i].plane_state &&
(res_ctx->pipe_ctx[i].plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE ||
- res_ctx->pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM))
+ dc_state_get_pipe_subvp_type(context, &res_ctx->pipe_ctx[i]) == SUBVP_PHANTOM))
pipes[pipe_cnt].pipe.src.num_cursors = 0;
else
pipes[pipe_cnt].pipe.src.num_cursors = dc->dml.ip.number_of_cursors;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
index 3686f1e7de3a..63c48c29ba49 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
@@ -3542,7 +3542,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
{
struct vba_vars_st *v = &mode_lib->vba;
int MinPrefetchMode, MaxPrefetchMode;
- int i;
+ int i, start_state;
unsigned int j, k, m;
bool EnoughWritebackUnits = true;
bool WritebackModeSupport = true;
@@ -3553,6 +3553,11 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
/*MODE SUPPORT, VOLTAGE STATE AND SOC CONFIGURATION*/
+ if (mode_lib->validate_max_state)
+ start_state = v->soc.num_states - 1;
+ else
+ start_state = 0;
+
CalculateMinAndMaxPrefetchMode(
mode_lib->vba.AllowDRAMSelfRefreshOrDRAMClockChangeInVblank,
&MinPrefetchMode, &MaxPrefetchMode);
@@ -3851,7 +3856,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->SingleDPPViewportSizeSupportPerPlane,
&v->ViewportSizeSupport[0][0]);
- for (i = 0; i < v->soc.num_states; i++) {
+ for (i = start_state; i < v->soc.num_states; i++) {
for (j = 0; j < 2; j++) {
v->MaxDispclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(v->MaxDispclk[i], v->DISPCLKDPPCLKVCOSpeed);
v->MaxDppclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(v->MaxDppclk[i], v->DISPCLKDPPCLKVCOSpeed);
@@ -4007,7 +4012,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
/*Total Available Pipes Support Check*/
- for (i = 0; i < v->soc.num_states; i++) {
+ for (i = start_state; i < v->soc.num_states; i++) {
for (j = 0; j < 2; j++) {
if (v->TotalNumberOfActiveDPP[i][j] <= v->MaxNumDPP) {
v->TotalAvailablePipesSupport[i][j] = true;
@@ -4046,7 +4051,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
}
- for (i = 0; i < v->soc.num_states; i++) {
+ for (i = start_state; i < v->soc.num_states; i++) {
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
v->RequiresDSC[i][k] = false;
v->RequiresFEC[i][k] = false;
@@ -4174,7 +4179,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
}
}
- for (i = 0; i < v->soc.num_states; i++) {
+ for (i = start_state; i < v->soc.num_states; i++) {
v->DIOSupport[i] = true;
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
if (!v->skip_dio_check[k] && v->BlendingAndTiming[k] == k && (v->Output[k] == dm_dp || v->Output[k] == dm_edp || v->Output[k] == dm_hdmi)
@@ -4185,7 +4190,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
}
- for (i = 0; i < v->soc.num_states; ++i) {
+ for (i = start_state; i < v->soc.num_states; ++i) {
v->ODMCombine4To1SupportCheckOK[i] = true;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->BlendingAndTiming[k] == k && v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_4to1
@@ -4197,7 +4202,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
/* Skip dscclk validation: as long as dispclk is supported, dscclk is also implicitly supported */
- for (i = 0; i < v->soc.num_states; i++) {
+ for (i = start_state; i < v->soc.num_states; i++) {
v->NotEnoughDSCUnits[i] = false;
v->TotalDSCUnitsRequired = 0.0;
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
@@ -4217,7 +4222,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
/*DSC Delay per state*/
- for (i = 0; i < v->soc.num_states; i++) {
+ for (i = start_state; i < v->soc.num_states; i++) {
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
if (v->OutputBppPerState[i][k] == BPP_INVALID) {
v->BPP = 0.0;
@@ -4333,7 +4338,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->cursor_bw[k] = v->NumberOfCursors[k] * v->CursorWidth[k][0] * v->CursorBPP[k][0] / 8.0 / (v->HTotal[k] / v->PixelClock[k]) * v->VRatio[k];
}
- for (i = 0; i < v->soc.num_states; i++) {
+ for (i = start_state; i < v->soc.num_states; i++) {
for (j = 0; j < 2; j++) {
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
v->swath_width_luma_ub_this_state[k] = v->swath_width_luma_ub_all_states[i][j][k];
@@ -5075,7 +5080,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
/*PTE Buffer Size Check*/
- for (i = 0; i < v->soc.num_states; i++) {
+ for (i = start_state; i < v->soc.num_states; i++) {
for (j = 0; j < 2; j++) {
v->PTEBufferSizeNotExceeded[i][j] = true;
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
@@ -5136,7 +5141,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
/*Mode Support, Voltage State and SOC Configuration*/
- for (i = v->soc.num_states - 1; i >= 0; i--) {
+ for (i = v->soc.num_states - 1; i >= start_state; i--) {
for (j = 0; j < 2; j++) {
if (v->ScaleRatioAndTapsSupport == 1 && v->SourceFormatPixelAndScanSupport == 1 && v->ViewportSizeSupport[i][j] == 1
&& v->DIOSupport[i] == 1 && v->ODMCombine4To1SupportCheckOK[i] == 1
@@ -5158,7 +5163,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
{
unsigned int MaximumMPCCombine = 0;
- for (i = v->soc.num_states; i >= 0; i--) {
+ for (i = v->soc.num_states; i >= start_state; i--) {
if (i == v->soc.num_states || v->ModeSupport[i][0] == true || v->ModeSupport[i][1] == true) {
v->VoltageLevel = i;
v->ModeIsSupported = v->ModeSupport[i][0] == true || v->ModeSupport[i][1] == true;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index 26411d4e9730..9f37f717a1f8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -32,6 +32,7 @@
#include "clk_mgr/dcn32/dcn32_smu13_driver_if.h"
#include "dcn30/dcn30_resource.h"
#include "link.h"
+#include "dc_state_priv.h"
#define DC_LOGGER_INIT(logger)
@@ -341,7 +342,7 @@ void dcn32_helper_populate_phantom_dlg_params(struct dc *dc,
if (!pipe->stream)
continue;
- if (pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ if (pipe->plane_state && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
pipes[pipe_idx].pipe.dest.vstartup_start =
get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
pipes[pipe_idx].pipe.dest.vupdate_offset =
@@ -624,7 +625,7 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) &&
!(pipe->stream->timing.pix_clk_100hz / 10000 > DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ) &&
(!dcn32_is_psr_capable(pipe) || (context->stream_count == 1 && dc->caps.dmub_caps.subvp_psr)) &&
- pipe->stream->mall_stream_config.type == SUBVP_NONE &&
+ dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE &&
(refresh_rate < 120 || dcn32_allow_subvp_high_refresh_rate(dc, context, pipe)) &&
!pipe->plane_state->address.tmz_surface &&
(vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0 ||
@@ -682,7 +683,7 @@ static bool dcn32_enough_pipes_for_subvp(struct dc *dc, struct dc_state *context
// Find the minimum pipe split count for non SubVP pipes
if (resource_is_pipe_type(pipe, OPP_HEAD) &&
- pipe->stream->mall_stream_config.type == SUBVP_NONE) {
+ dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE) {
split_cnt = 0;
while (pipe) {
split_cnt++;
@@ -735,8 +736,8 @@ static bool subvp_subvp_schedulable(struct dc *dc, struct dc_state *context)
* and also to store the two main SubVP pipe pointers in subvp_pipes[2].
*/
if (pipe->stream && pipe->plane_state && !pipe->top_pipe &&
- pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
- phantom = pipe->stream->mall_stream_config.paired_stream;
+ dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) {
+ phantom = dc_state_get_paired_subvp_stream(context, pipe->stream);
microschedule_lines = (phantom->timing.v_total - phantom->timing.v_front_porch) +
phantom->timing.v_addressable;
@@ -804,6 +805,9 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context)
int16_t stretched_drr_us = 0;
int16_t drr_stretched_vblank_us = 0;
int16_t max_vblank_mallregion = 0;
+ struct dc_stream_state *phantom_stream;
+ bool subvp_found = false;
+ bool drr_found = false;
// Find SubVP pipe
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -816,8 +820,10 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context)
continue;
// Find the SubVP pipe
- if (pipe->stream->mall_stream_config.type == SUBVP_MAIN)
+ if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) {
+ subvp_found = true;
break;
+ }
}
// Find the DRR pipe
@@ -825,32 +831,37 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context)
drr_pipe = &context->res_ctx.pipe_ctx[i];
// We check for master pipe only
- if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
- !resource_is_pipe_type(pipe, DPP_PIPE))
+ if (!resource_is_pipe_type(drr_pipe, OTG_MASTER) ||
+ !resource_is_pipe_type(drr_pipe, DPP_PIPE))
continue;
- if (drr_pipe->stream->mall_stream_config.type == SUBVP_NONE && drr_pipe->stream->ignore_msa_timing_param &&
- (drr_pipe->stream->allow_freesync || drr_pipe->stream->vrr_active_variable || drr_pipe->stream->vrr_active_fixed))
+ if (dc_state_get_pipe_subvp_type(context, drr_pipe) == SUBVP_NONE && drr_pipe->stream->ignore_msa_timing_param &&
+ (drr_pipe->stream->allow_freesync || drr_pipe->stream->vrr_active_variable || drr_pipe->stream->vrr_active_fixed)) {
+ drr_found = true;
break;
+ }
}
- main_timing = &pipe->stream->timing;
- phantom_timing = &pipe->stream->mall_stream_config.paired_stream->timing;
- drr_timing = &drr_pipe->stream->timing;
- prefetch_us = (phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total /
- (double)(phantom_timing->pix_clk_100hz * 100) * 1000000 +
- dc->caps.subvp_prefetch_end_to_mall_start_us;
- subvp_active_us = main_timing->v_addressable * main_timing->h_total /
- (double)(main_timing->pix_clk_100hz * 100) * 1000000;
- drr_frame_us = drr_timing->v_total * drr_timing->h_total /
- (double)(drr_timing->pix_clk_100hz * 100) * 1000000;
- // P-State allow width and FW delays already included phantom_timing->v_addressable
- mall_region_us = phantom_timing->v_addressable * phantom_timing->h_total /
- (double)(phantom_timing->pix_clk_100hz * 100) * 1000000;
- stretched_drr_us = drr_frame_us + mall_region_us + SUBVP_DRR_MARGIN_US;
- drr_stretched_vblank_us = (drr_timing->v_total - drr_timing->v_addressable) * drr_timing->h_total /
- (double)(drr_timing->pix_clk_100hz * 100) * 1000000 + (stretched_drr_us - drr_frame_us);
- max_vblank_mallregion = drr_stretched_vblank_us > mall_region_us ? drr_stretched_vblank_us : mall_region_us;
+ if (subvp_found && drr_found) {
+ phantom_stream = dc_state_get_paired_subvp_stream(context, pipe->stream);
+ main_timing = &pipe->stream->timing;
+ phantom_timing = &phantom_stream->timing;
+ drr_timing = &drr_pipe->stream->timing;
+ prefetch_us = (phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total /
+ (double)(phantom_timing->pix_clk_100hz * 100) * 1000000 +
+ dc->caps.subvp_prefetch_end_to_mall_start_us;
+ subvp_active_us = main_timing->v_addressable * main_timing->h_total /
+ (double)(main_timing->pix_clk_100hz * 100) * 1000000;
+ drr_frame_us = drr_timing->v_total * drr_timing->h_total /
+ (double)(drr_timing->pix_clk_100hz * 100) * 1000000;
+ // P-State allow width and FW delays already included phantom_timing->v_addressable
+ mall_region_us = phantom_timing->v_addressable * phantom_timing->h_total /
+ (double)(phantom_timing->pix_clk_100hz * 100) * 1000000;
+ stretched_drr_us = drr_frame_us + mall_region_us + SUBVP_DRR_MARGIN_US;
+ drr_stretched_vblank_us = (drr_timing->v_total - drr_timing->v_addressable) * drr_timing->h_total /
+ (double)(drr_timing->pix_clk_100hz * 100) * 1000000 + (stretched_drr_us - drr_frame_us);
+ max_vblank_mallregion = drr_stretched_vblank_us > mall_region_us ? drr_stretched_vblank_us : mall_region_us;
+ }
/* We consider SubVP + DRR schedulable if the stretched frame duration of the DRR display (i.e. the
* highest refresh rate + margin that can support UCLK P-State switch) passes the static analysis
@@ -895,6 +906,8 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context)
struct dc_crtc_timing *main_timing = NULL;
struct dc_crtc_timing *phantom_timing = NULL;
struct dc_crtc_timing *vblank_timing = NULL;
+ struct dc_stream_state *phantom_stream;
+ enum mall_stream_type pipe_mall_type;
/* For SubVP + VBLANK/DRR cases, we assume there can only be
* a single VBLANK/DRR display. If DML outputs SubVP + VBLANK
@@ -904,6 +917,7 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context)
*/
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
+ pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe);
// We check for master pipe, but it shouldn't matter since we only need
// the pipe for timing info (stream should be same for any pipe splits)
@@ -911,18 +925,19 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context)
!resource_is_pipe_type(pipe, DPP_PIPE))
continue;
- if (!found && pipe->stream->mall_stream_config.type == SUBVP_NONE) {
+ if (!found && pipe_mall_type == SUBVP_NONE) {
// Found pipe which is not SubVP or Phantom (i.e. the VBLANK pipe).
vblank_index = i;
found = true;
}
- if (!subvp_pipe && pipe->stream->mall_stream_config.type == SUBVP_MAIN)
+ if (!subvp_pipe && pipe_mall_type == SUBVP_MAIN)
subvp_pipe = pipe;
}
if (found) {
+ phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream);
main_timing = &subvp_pipe->stream->timing;
- phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing;
+ phantom_timing = &phantom_stream->timing;
vblank_timing = &context->res_ctx.pipe_ctx[vblank_index].stream->timing;
// Prefetch time is equal to VACTIVE + BP + VSYNC of the phantom pipe
// Also include the prefetch end to mallstart delay time
@@ -977,7 +992,7 @@ static bool subvp_subvp_admissable(struct dc *dc,
continue;
if (pipe->plane_state && !pipe->top_pipe &&
- pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
+ dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) {
refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 +
pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
@@ -1026,23 +1041,23 @@ static bool subvp_validate_static_schedulability(struct dc *dc,
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe);
if (!pipe->stream)
continue;
if (pipe->plane_state && !pipe->top_pipe) {
- if (pipe->stream->mall_stream_config.type == SUBVP_MAIN)
+ if (pipe_mall_type == SUBVP_MAIN)
subvp_count++;
- if (pipe->stream->mall_stream_config.type == SUBVP_NONE) {
+ if (pipe_mall_type == SUBVP_NONE)
non_subvp_pipes++;
- }
}
// Count how many planes that aren't SubVP/phantom are capable of VACTIVE
// switching (SubVP + VACTIVE unsupported). In situations where we force
// SubVP for a VACTIVE plane, we don't want to increment the vactive_count.
if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vlevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] > 0 &&
- pipe->stream->mall_stream_config.type == SUBVP_NONE) {
+ pipe_mall_type == SUBVP_NONE) {
vactive_count++;
}
pipe_idx++;
@@ -1078,7 +1093,7 @@ static void assign_subvp_index(struct dc *dc, struct dc_state *context)
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) &&
- pipe_ctx->stream->mall_stream_config.type == SUBVP_MAIN) {
+ dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_MAIN) {
pipe_ctx->subvp_index = index++;
} else {
pipe_ctx->subvp_index = 0;
@@ -1532,7 +1547,8 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
// If SubVP pipe config is unsupported (or cannot be used for UCLK switching)
// remove phantom pipes and repopulate dml pipes
if (!found_supported_config) {
- dc->res_pool->funcs->remove_phantom_pipes(dc, context, false);
+ dc_state_remove_phantom_streams_and_planes(dc, context);
+ dc_state_release_phantom_streams_and_planes(dc, context);
vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] = dm_dram_clock_change_unsupported;
*pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, false);
@@ -1684,7 +1700,7 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,
pipes[pipe_idx].pipe.dest.vready_offset = get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt,
pipe_idx);
- if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ if (dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM) {
// Phantom pipe requires that DET_SIZE = 0 and no unbounded requests
context->res_ctx.pipe_ctx[i].det_buffer_size_kb = 0;
context->res_ctx.pipe_ctx[i].unbounded_req = false;
@@ -1716,7 +1732,7 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,
context->res_ctx.pipe_ctx[i].plane_state != context->res_ctx.pipe_ctx[i].top_pipe->plane_state) &&
context->res_ctx.pipe_ctx[i].prev_odm_pipe == NULL) {
/* SS: all active surfaces stored in MALL */
- if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type != SUBVP_PHANTOM) {
+ if (dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) != SUBVP_PHANTOM) {
context->bw_ctx.bw.dcn.mall_ss_size_bytes += context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes;
if (context->res_ctx.pipe_ctx[i].stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED) {
@@ -1930,7 +1946,8 @@ bool dcn32_internal_validate_bw(struct dc *dc,
return false;
// For each full update, remove all existing phantom pipes first
- dc->res_pool->funcs->remove_phantom_pipes(dc, context, fast_validate);
+ dc_state_remove_phantom_streams_and_planes(dc, context);
+ dc_state_release_phantom_streams_and_planes(dc, context);
dc->res_pool->funcs->update_soc_for_wm_a(dc, context);
@@ -2192,6 +2209,7 @@ bool dcn32_internal_validate_bw(struct dc *dc,
int i;
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
+ dcn32_update_dml_pipes_odm_policy_based_on_context(dc, context, pipes);
/* repopulate_pipes = 1 means the pipes were either split or merged. In this case
* we have to re-calculate the DET allocation and run through DML once more to
@@ -2200,7 +2218,9 @@ bool dcn32_internal_validate_bw(struct dc *dc,
* */
context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
dm_prefetch_support_uclk_fclk_and_stutter_if_possible;
+
vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
+
if (vlevel == context->bw_ctx.dml.soc.num_states) {
/* failed after DET size changes */
goto validate_fail;
@@ -3445,7 +3465,15 @@ void dcn32_assign_fpo_vactive_candidate(struct dc *dc, const struct dc_state *co
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
const struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- if (!pipe->stream)
+ /* In DCN32/321, FPO uses per-pipe P-State force.
+ * If there's no planes, HUBP is power gated and
+ * therefore programming UCLK_PSTATE_FORCE does
+ * nothing (P-State will always be asserted naturally
+ * on a pipe that has HUBP power gated. Therefore we
+ * only want to enable FPO if the FPO pipe has both
+ * a stream and a plane.
+ */
+ if (!pipe->stream || !pipe->plane_state)
continue;
if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
index cbdfb762c10c..6c84b0fa40f4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
@@ -813,6 +813,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
(v->DRAMSpeedPerState[mode_lib->vba.VoltageLevel] <= MEM_STROBE_FREQ_MHZ ||
v->DCFCLKPerState[mode_lib->vba.VoltageLevel] <= DCFCLK_FREQ_EXTRA_PREFETCH_REQ_MHZ) ?
mode_lib->vba.ip.min_prefetch_in_strobe_us : 0,
+ mode_lib->vba.PrefetchModePerState[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] > 0 || mode_lib->vba.DRAMClockChangeRequirementFinal == false,
+
/* Output */
&v->DSTXAfterScaler[k],
&v->DSTYAfterScaler[k],
@@ -3317,6 +3319,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->SwathHeightCThisState[k], v->TWait,
(v->DRAMSpeedPerState[i] <= MEM_STROBE_FREQ_MHZ || v->DCFCLKState[i][j] <= DCFCLK_FREQ_EXTRA_PREFETCH_REQ_MHZ) ?
mode_lib->vba.ip.min_prefetch_in_strobe_us : 0,
+ mode_lib->vba.PrefetchModePerState[i][j] > 0 || mode_lib->vba.DRAMClockChangeRequirementFinal == false,
/* Output */
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.DSTXAfterScaler[k],
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
index d940dfa5ae43..80fccd4999a5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
@@ -3423,6 +3423,7 @@ bool dml32_CalculatePrefetchSchedule(
unsigned int SwathHeightC,
double TWait,
double TPreReq,
+ bool ExtendPrefetchIfPossible,
/* Output */
double *DSTXAfterScaler,
double *DSTYAfterScaler,
@@ -3892,12 +3893,32 @@ bool dml32_CalculatePrefetchSchedule(
/* Clamp to oto for bandwidth calculation */
LinesForPrefetchBandwidth = dst_y_prefetch_oto;
} else {
- *DestinationLinesForPrefetch = dst_y_prefetch_equ;
- TimeForFetchingMetaPTE = Tvm_equ;
- TimeForFetchingRowInVBlank = Tr0_equ;
- *PrefetchBandwidth = prefetch_bw_equ;
- /* Clamp to equ for bandwidth calculation */
- LinesForPrefetchBandwidth = dst_y_prefetch_equ;
+ /* For mode programming we want to extend the prefetch as much as possible
+ * (up to oto, or as long as we can for equ) if we're not already applying
+ * the 60us prefetch requirement. This is to avoid intermittent underflow
+ * issues during prefetch.
+ *
+ * The prefetch extension is applied under the following scenarios:
+ * 1. We're in prefetch mode > 0 (i.e. we don't support MCLK switch in blank)
+ * 2. We're using subvp or drr methods of p-state switch, in which case we
+ * we don't care if prefetch takes up more of the blanking time
+ *
+ * Mode programming typically chooses the smallest prefetch time possible
+ * (i.e. highest bandwidth during prefetch) presumably to create margin between
+ * p-states / c-states that happen in vblank and prefetch. Therefore we only
+ * apply this prefetch extension when p-state in vblank is not required (UCLK
+ * p-states take up the most vblank time).
+ */
+ if (ExtendPrefetchIfPossible && TPreReq == 0 && VStartup < MaxVStartup) {
+ MyError = true;
+ } else {
+ *DestinationLinesForPrefetch = dst_y_prefetch_equ;
+ TimeForFetchingMetaPTE = Tvm_equ;
+ TimeForFetchingRowInVBlank = Tr0_equ;
+ *PrefetchBandwidth = prefetch_bw_equ;
+ /* Clamp to equ for bandwidth calculation */
+ LinesForPrefetchBandwidth = dst_y_prefetch_equ;
+ }
}
*DestinationLinesToRequestVMInVBlank = dml_ceil(4.0 * TimeForFetchingMetaPTE / LineTime, 1.0) / 4.0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
index 592d174df6c6..5d34735df83d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
@@ -747,6 +747,7 @@ bool dml32_CalculatePrefetchSchedule(
unsigned int SwathHeightC,
double TWait,
double TPreReq,
+ bool ExtendPrefetchIfPossible,
/* Output */
double *DSTXAfterScaler,
double *DSTYAfterScaler,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
index 39cf1ae3a3e1..475c4ec43c01 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
@@ -124,7 +124,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
.phyclk_mhz = 600.0,
.phyclk_d18_mhz = 667.0,
.dscclk_mhz = 186.0,
- .dtbclk_mhz = 625.0,
+ .dtbclk_mhz = 600.0,
},
{
.state = 1,
@@ -133,7 +133,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
.phyclk_mhz = 810.0,
.phyclk_d18_mhz = 667.0,
.dscclk_mhz = 209.0,
- .dtbclk_mhz = 625.0,
+ .dtbclk_mhz = 600.0,
},
{
.state = 2,
@@ -142,7 +142,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
.phyclk_mhz = 810.0,
.phyclk_d18_mhz = 667.0,
.dscclk_mhz = 209.0,
- .dtbclk_mhz = 625.0,
+ .dtbclk_mhz = 600.0,
},
{
.state = 3,
@@ -151,7 +151,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
.phyclk_mhz = 810.0,
.phyclk_d18_mhz = 667.0,
.dscclk_mhz = 371.0,
- .dtbclk_mhz = 625.0,
+ .dtbclk_mhz = 600.0,
},
{
.state = 4,
@@ -160,15 +160,15 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
.phyclk_mhz = 810.0,
.phyclk_d18_mhz = 667.0,
.dscclk_mhz = 417.0,
- .dtbclk_mhz = 625.0,
+ .dtbclk_mhz = 600.0,
},
},
.num_states = 5,
.sr_exit_time_us = 14.0,
.sr_enter_plus_exit_time_us = 16.0,
- .sr_exit_z8_time_us = 525.0,
- .sr_enter_plus_exit_z8_time_us = 715.0,
- .fclk_change_latency_us = 20.0,
+ .sr_exit_z8_time_us = 210.0,
+ .sr_enter_plus_exit_z8_time_us = 320.0,
+ .fclk_change_latency_us = 24.0,
.usr_retraining_latency_us = 2,
.writeback_latency_us = 12.0,
@@ -326,6 +326,25 @@ void dcn35_update_bw_bounding_box_fpu(struct dc *dc,
dcn3_5_soc.dram_clock_change_latency_us =
dc->debug.dram_clock_change_latency_ns / 1000.0;
}
+
+ if (dc->bb_overrides.dram_clock_change_latency_ns > 0)
+ dcn3_5_soc.dram_clock_change_latency_us =
+ dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
+
+ if (dc->bb_overrides.sr_exit_time_ns > 0)
+ dcn3_5_soc.sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0;
+
+ if (dc->bb_overrides.sr_enter_plus_exit_time_ns > 0)
+ dcn3_5_soc.sr_enter_plus_exit_time_us =
+ dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
+
+ if (dc->bb_overrides.sr_exit_z8_time_ns > 0)
+ dcn3_5_soc.sr_exit_z8_time_us = dc->bb_overrides.sr_exit_z8_time_ns / 1000.0;
+
+ if (dc->bb_overrides.sr_enter_plus_exit_z8_time_ns > 0)
+ dcn3_5_soc.sr_enter_plus_exit_z8_time_us =
+ dc->bb_overrides.sr_enter_plus_exit_z8_time_ns / 1000.0;
+
/*temp till dml2 fully work without dml1*/
dml_init_instance(&dc->dml, &dcn3_5_soc, &dcn3_5_ip,
DML_PROJECT_DCN31);
@@ -348,6 +367,8 @@ void dcn35_update_bw_bounding_box_fpu(struct dc *dc,
clock_limits[i].socclk_mhz;
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].memclk_mhz =
clk_table->entries[i].memclk_mhz * clk_table->entries[i].wck_ratio;
+ dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz =
+ clock_limits[i].dtbclk_mhz;
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels =
clk_table->num_entries;
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_fclk_levels =
@@ -360,6 +381,8 @@ void dcn35_update_bw_bounding_box_fpu(struct dc *dc,
clk_table->num_entries;
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_memclk_levels =
clk_table->num_entries;
+ dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dtbclk_levels =
+ clk_table->num_entries;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
index 59718ee33e51..9be5ebf3a8c0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
@@ -5420,7 +5420,7 @@ static void CalculateOutputLink(
*OutBpp = TruncToValidBPP((1 - Downspreading / 100) * 13500, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output,
OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (dml_uint_t)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, RequiredSlots);
- if (OutBpp == 0 && PHYCLKD32PerState < 20000 / 32 && DSCEnable == dml_dsc_enable_if_necessary && ForcedOutputLinkBPP == 0) {
+ if (*OutBpp == 0 && PHYCLKD32PerState < 20000 / 32 && DSCEnable == dml_dsc_enable_if_necessary && ForcedOutputLinkBPP == 0) {
*RequiresDSC = true;
LinkDSCEnable = true;
*OutBpp = TruncToValidBPP((1 - Downspreading / 100) * 13500, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output,
@@ -6229,7 +6229,7 @@ static void set_calculate_prefetch_schedule_params(struct display_mode_lib_st *m
CalculatePrefetchSchedule_params->GPUVMEnable = mode_lib->ms.cache_display_cfg.plane.GPUVMEnable;
CalculatePrefetchSchedule_params->HostVMEnable = mode_lib->ms.cache_display_cfg.plane.HostVMEnable;
CalculatePrefetchSchedule_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
- CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
+ CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
CalculatePrefetchSchedule_params->DynamicMetadataEnable = mode_lib->ms.cache_display_cfg.plane.DynamicMetadataEnable[k];
CalculatePrefetchSchedule_params->DynamicMetadataVMEnabled = mode_lib->ms.ip.dynamic_metadata_vm_enabled;
CalculatePrefetchSchedule_params->DynamicMetadataLinesBeforeActiveRequired = mode_lib->ms.cache_display_cfg.plane.DynamicMetadataLinesBeforeActiveRequired[k];
@@ -9446,13 +9446,13 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
CalculateWatermarks_params->CompressedBufferSizeInkByte = locals->CompressedBufferSizeInkByte;
// Output
- CalculateWatermarks_params->Watermark = &s->dummy_watermark; // Watermarks *Watermark
- CalculateWatermarks_params->DRAMClockChangeSupport = &mode_lib->ms.support.DRAMClockChangeSupport[j];
- CalculateWatermarks_params->MaxActiveDRAMClockChangeLatencySupported = &s->dummy_single_array[0][0]; // dml_float_t *MaxActiveDRAMClockChangeLatencySupported[]
- CalculateWatermarks_params->SubViewportLinesNeededInMALL = &mode_lib->ms.SubViewportLinesNeededInMALL[j]; // dml_uint_t SubViewportLinesNeededInMALL[]
- CalculateWatermarks_params->FCLKChangeSupport = &mode_lib->ms.support.FCLKChangeSupport[j];
- CalculateWatermarks_params->MaxActiveFCLKChangeLatencySupported = &s->dummy_single[0]; // dml_float_t *MaxActiveFCLKChangeLatencySupported
- CalculateWatermarks_params->USRRetrainingSupport = &mode_lib->ms.support.USRRetrainingSupport[j];
+ CalculateWatermarks_params->Watermark = &locals->Watermark; // Watermarks *Watermark
+ CalculateWatermarks_params->DRAMClockChangeSupport = &locals->DRAMClockChangeSupport;
+ CalculateWatermarks_params->MaxActiveDRAMClockChangeLatencySupported = locals->MaxActiveDRAMClockChangeLatencySupported; // dml_float_t *MaxActiveDRAMClockChangeLatencySupported[]
+ CalculateWatermarks_params->SubViewportLinesNeededInMALL = locals->SubViewportLinesNeededInMALL; // dml_uint_t SubViewportLinesNeededInMALL[]
+ CalculateWatermarks_params->FCLKChangeSupport = &locals->FCLKChangeSupport;
+ CalculateWatermarks_params->MaxActiveFCLKChangeLatencySupported = &locals->MaxActiveFCLKChangeLatencySupported; // dml_float_t *MaxActiveFCLKChangeLatencySupported
+ CalculateWatermarks_params->USRRetrainingSupport = &locals->USRRetrainingSupport;
CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
&mode_lib->scratch,
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h
index e85866db80ff..7ca7f2a743c2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h
@@ -38,5 +38,6 @@
#include "core_types.h"
#include "dsc.h"
#include "clk_mgr.h"
+#include "dc_state_priv.h"
#endif //__DML2_DC_TYPES_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
index 32f8a43af3d6..282d70e2b18a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
@@ -51,7 +51,7 @@ unsigned int dml2_helper_calculate_num_ways_for_subvp(struct dml2_context *ctx,
// Find the phantom pipes
if (pipe->stream && pipe->plane_state && !pipe->top_pipe && !pipe->prev_odm_pipe &&
- pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
bytes_per_pixel = pipe->plane_state->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4;
mblk_width = ctx->config.mall_cfg.mblk_width_pixels;
mblk_height = bytes_per_pixel == 4 ? mblk_width = ctx->config.mall_cfg.mblk_height_4bpe_pixels : ctx->config.mall_cfg.mblk_height_8bpe_pixels;
@@ -253,7 +253,7 @@ static bool assign_subvp_pipe(struct dml2_context *ctx, struct dc_state *context
* to combine this with SubVP can cause issues with the scheduling).
*/
if (pipe->plane_state && !pipe->top_pipe &&
- pipe->stream->mall_stream_config.type == SUBVP_NONE && refresh_rate < 120 &&
+ ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe) == SUBVP_NONE && refresh_rate < 120 &&
vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0) {
while (pipe) {
num_pipes++;
@@ -317,7 +317,7 @@ static bool enough_pipes_for_subvp(struct dml2_context *ctx, struct dc_state *st
// Find the minimum pipe split count for non SubVP pipes
if (pipe->stream && !pipe->top_pipe &&
- pipe->stream->mall_stream_config.type == SUBVP_NONE) {
+ ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(state, pipe) == SUBVP_NONE) {
split_cnt = 0;
while (pipe) {
split_cnt++;
@@ -372,8 +372,8 @@ static bool subvp_subvp_schedulable(struct dml2_context *ctx, struct dc_state *c
* and also to store the two main SubVP pipe pointers in subvp_pipes[2].
*/
if (pipe->stream && pipe->plane_state && !pipe->top_pipe &&
- pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
- phantom = pipe->stream->mall_stream_config.paired_stream;
+ ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) {
+ phantom = ctx->config.svp_pstate.callbacks.get_paired_subvp_stream(context, pipe->stream);
microschedule_lines = (phantom->timing.v_total - phantom->timing.v_front_porch) +
phantom->timing.v_addressable;
@@ -435,6 +435,7 @@ bool dml2_svp_drr_schedulable(struct dml2_context *ctx, struct dc_state *context
struct pipe_ctx *pipe = NULL;
struct dc_crtc_timing *main_timing = NULL;
struct dc_crtc_timing *phantom_timing = NULL;
+ struct dc_stream_state *phantom_stream;
int16_t prefetch_us = 0;
int16_t mall_region_us = 0;
int16_t drr_frame_us = 0; // nominal frame time
@@ -453,12 +454,13 @@ bool dml2_svp_drr_schedulable(struct dml2_context *ctx, struct dc_state *context
continue;
// Find the SubVP pipe
- if (pipe->stream->mall_stream_config.type == SUBVP_MAIN)
+ if (ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe) == SUBVP_MAIN)
break;
}
+ phantom_stream = ctx->config.svp_pstate.callbacks.get_paired_subvp_stream(context, pipe->stream);
main_timing = &pipe->stream->timing;
- phantom_timing = &pipe->stream->mall_stream_config.paired_stream->timing;
+ phantom_timing = &phantom_stream->timing;
prefetch_us = (phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total /
(double)(phantom_timing->pix_clk_100hz * 100) * 1000000 +
ctx->config.svp_pstate.subvp_prefetch_end_to_mall_start_us;
@@ -519,6 +521,8 @@ static bool subvp_vblank_schedulable(struct dml2_context *ctx, struct dc_state *
struct dc_crtc_timing *main_timing = NULL;
struct dc_crtc_timing *phantom_timing = NULL;
struct dc_crtc_timing *vblank_timing = NULL;
+ struct dc_stream_state *phantom_stream;
+ enum mall_stream_type pipe_mall_type;
/* For SubVP + VBLANK/DRR cases, we assume there can only be
* a single VBLANK/DRR display. If DML outputs SubVP + VBLANK
@@ -528,19 +532,20 @@ static bool subvp_vblank_schedulable(struct dml2_context *ctx, struct dc_state *
*/
for (i = 0; i < ctx->config.dcn_pipe_count; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
+ pipe_mall_type = ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe);
// We check for master pipe, but it shouldn't matter since we only need
// the pipe for timing info (stream should be same for any pipe splits)
if (!pipe->stream || !pipe->plane_state || pipe->top_pipe || pipe->prev_odm_pipe)
continue;
- if (!found && pipe->stream->mall_stream_config.type == SUBVP_NONE) {
+ if (!found && pipe_mall_type == SUBVP_NONE) {
// Found pipe which is not SubVP or Phantom (i.e. the VBLANK pipe).
vblank_index = i;
found = true;
}
- if (!subvp_pipe && pipe->stream->mall_stream_config.type == SUBVP_MAIN)
+ if (!subvp_pipe && pipe_mall_type == SUBVP_MAIN)
subvp_pipe = pipe;
}
// Use ignore_msa_timing_param flag to identify as DRR
@@ -548,8 +553,9 @@ static bool subvp_vblank_schedulable(struct dml2_context *ctx, struct dc_state *
// SUBVP + DRR case
schedulable = dml2_svp_drr_schedulable(ctx, context, &context->res_ctx.pipe_ctx[vblank_index].stream->timing);
} else if (found) {
+ phantom_stream = ctx->config.svp_pstate.callbacks.get_paired_subvp_stream(context, subvp_pipe->stream);
main_timing = &subvp_pipe->stream->timing;
- phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing;
+ phantom_timing = &phantom_stream->timing;
vblank_timing = &context->res_ctx.pipe_ctx[vblank_index].stream->timing;
// Prefetch time is equal to VACTIVE + BP + VSYNC of the phantom pipe
// Also include the prefetch end to mallstart delay time
@@ -602,19 +608,20 @@ bool dml2_svp_validate_static_schedulability(struct dml2_context *ctx, struct dc
for (i = 0, pipe_idx = 0; i < ctx->config.dcn_pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ enum mall_stream_type pipe_mall_type = ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe);
if (!pipe->stream)
continue;
if (pipe->plane_state && !pipe->top_pipe &&
- pipe->stream->mall_stream_config.type == SUBVP_MAIN)
+ pipe_mall_type == SUBVP_MAIN)
subvp_count++;
// Count how many planes that aren't SubVP/phantom are capable of VACTIVE
// switching (SubVP + VACTIVE unsupported). In situations where we force
// SubVP for a VACTIVE plane, we don't want to increment the vactive_count.
if (vba->ActiveDRAMClockChangeLatencyMargin[vba->pipe_plane[pipe_idx]] > 0 &&
- pipe->stream->mall_stream_config.type == SUBVP_NONE) {
+ pipe_mall_type == SUBVP_NONE) {
vactive_count++;
}
pipe_idx++;
@@ -708,14 +715,10 @@ static void set_phantom_stream_timing(struct dml2_context *ctx, struct dc_state
static struct dc_stream_state *enable_phantom_stream(struct dml2_context *ctx, struct dc_state *state, unsigned int dc_pipe_idx, unsigned int svp_height, unsigned int vstartup)
{
struct pipe_ctx *ref_pipe = &state->res_ctx.pipe_ctx[dc_pipe_idx];
- struct dc_stream_state *phantom_stream = ctx->config.svp_pstate.callbacks.create_stream_for_sink(ref_pipe->stream->sink);
-
- phantom_stream->signal = SIGNAL_TYPE_VIRTUAL;
- phantom_stream->dpms_off = true;
- phantom_stream->mall_stream_config.type = SUBVP_PHANTOM;
- phantom_stream->mall_stream_config.paired_stream = ref_pipe->stream;
- ref_pipe->stream->mall_stream_config.type = SUBVP_MAIN;
- ref_pipe->stream->mall_stream_config.paired_stream = phantom_stream;
+ struct dc_stream_state *phantom_stream = ctx->config.svp_pstate.callbacks.create_phantom_stream(
+ ctx->config.svp_pstate.callbacks.dc,
+ state,
+ ref_pipe->stream);
/* stream has limited viewport and small timing */
memcpy(&phantom_stream->timing, &ref_pipe->stream->timing, sizeof(phantom_stream->timing));
@@ -723,7 +726,10 @@ static struct dc_stream_state *enable_phantom_stream(struct dml2_context *ctx, s
memcpy(&phantom_stream->dst, &ref_pipe->stream->dst, sizeof(phantom_stream->dst));
set_phantom_stream_timing(ctx, state, ref_pipe, phantom_stream, dc_pipe_idx, svp_height, vstartup);
- ctx->config.svp_pstate.callbacks.add_stream_to_ctx(ctx->config.svp_pstate.callbacks.dc, state, phantom_stream);
+ ctx->config.svp_pstate.callbacks.add_phantom_stream(ctx->config.svp_pstate.callbacks.dc,
+ state,
+ phantom_stream,
+ ref_pipe->stream);
return phantom_stream;
}
@@ -740,7 +746,10 @@ static void enable_phantom_plane(struct dml2_context *ctx,
if (curr_pipe->top_pipe && curr_pipe->top_pipe->plane_state == curr_pipe->plane_state) {
phantom_plane = prev_phantom_plane;
} else {
- phantom_plane = ctx->config.svp_pstate.callbacks.create_plane(ctx->config.svp_pstate.callbacks.dc);
+ phantom_plane = ctx->config.svp_pstate.callbacks.create_phantom_plane(
+ ctx->config.svp_pstate.callbacks.dc,
+ state,
+ curr_pipe->plane_state);
}
memcpy(&phantom_plane->address, &curr_pipe->plane_state->address, sizeof(phantom_plane->address));
@@ -763,9 +772,7 @@ static void enable_phantom_plane(struct dml2_context *ctx,
phantom_plane->clip_rect.y = 0;
phantom_plane->clip_rect.height = phantom_stream->timing.v_addressable;
- phantom_plane->is_phantom = true;
-
- ctx->config.svp_pstate.callbacks.add_plane_to_context(ctx->config.svp_pstate.callbacks.dc, phantom_stream, phantom_plane, state);
+ ctx->config.svp_pstate.callbacks.add_phantom_plane(ctx->config.svp_pstate.callbacks.dc, phantom_stream, phantom_plane, state);
curr_pipe = curr_pipe->bottom_pipe;
prev_phantom_plane = phantom_plane;
@@ -790,7 +797,7 @@ static void add_phantom_pipes_for_main_pipe(struct dml2_context *ctx, struct dc_
// We determine which phantom pipes were added by comparing with
// the phantom stream.
if (pipe->plane_state && pipe->stream && pipe->stream == phantom_stream &&
- pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(state, pipe) == SUBVP_PHANTOM) {
pipe->stream->use_dynamic_meta = false;
pipe->plane_state->flip_immediate = false;
if (!ctx->config.svp_pstate.callbacks.build_scaling_params(pipe)) {
@@ -800,7 +807,7 @@ static void add_phantom_pipes_for_main_pipe(struct dml2_context *ctx, struct dc_
}
}
-static bool remove_all_planes_for_stream(struct dml2_context *ctx, struct dc_stream_state *stream, struct dc_state *context)
+static bool remove_all_phantom_planes_for_stream(struct dml2_context *ctx, struct dc_stream_state *stream, struct dc_state *context)
{
int i, old_plane_count;
struct dc_stream_status *stream_status = NULL;
@@ -821,9 +828,11 @@ static bool remove_all_planes_for_stream(struct dml2_context *ctx, struct dc_str
for (i = 0; i < old_plane_count; i++)
del_planes[i] = stream_status->plane_states[i];
- for (i = 0; i < old_plane_count; i++)
- if (!ctx->config.svp_pstate.callbacks.remove_plane_from_context(ctx->config.svp_pstate.callbacks.dc, stream, del_planes[i], context))
+ for (i = 0; i < old_plane_count; i++) {
+ if (!ctx->config.svp_pstate.callbacks.remove_phantom_plane(ctx->config.svp_pstate.callbacks.dc, stream, del_planes[i], context))
return false;
+ ctx->config.svp_pstate.callbacks.release_phantom_plane(ctx->config.svp_pstate.callbacks.dc, context, del_planes[i]);
+ }
return true;
}
@@ -832,35 +841,21 @@ bool dml2_svp_remove_all_phantom_pipes(struct dml2_context *ctx, struct dc_state
{
int i;
bool removed_pipe = false;
- struct dc_plane_state *phantom_plane = NULL;
struct dc_stream_state *phantom_stream = NULL;
for (i = 0; i < ctx->config.dcn_pipe_count; i++) {
struct pipe_ctx *pipe = &state->res_ctx.pipe_ctx[i];
// build scaling params for phantom pipes
- if (pipe->plane_state && pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
- phantom_plane = pipe->plane_state;
+ if (pipe->plane_state && pipe->stream && ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(state, pipe) == SUBVP_PHANTOM) {
phantom_stream = pipe->stream;
- remove_all_planes_for_stream(ctx, pipe->stream, state);
- ctx->config.svp_pstate.callbacks.remove_stream_from_ctx(ctx->config.svp_pstate.callbacks.dc, state, pipe->stream);
-
- /* Ref count is incremented on allocation and also when added to the context.
- * Therefore we must call release for the the phantom plane and stream once
- * they are removed from the ctx to finally decrement the refcount to 0 to free.
- */
- ctx->config.svp_pstate.callbacks.plane_state_release(phantom_plane);
- ctx->config.svp_pstate.callbacks.stream_release(phantom_stream);
+ remove_all_phantom_planes_for_stream(ctx, phantom_stream, state);
+ ctx->config.svp_pstate.callbacks.remove_phantom_stream(ctx->config.svp_pstate.callbacks.dc, state, phantom_stream);
+ ctx->config.svp_pstate.callbacks.release_phantom_stream(ctx->config.svp_pstate.callbacks.dc, state, phantom_stream);
removed_pipe = true;
}
- // Clear all phantom stream info
- if (pipe->stream) {
- pipe->stream->mall_stream_config.type = SUBVP_NONE;
- pipe->stream->mall_stream_config.paired_stream = NULL;
- }
-
if (pipe->plane_state) {
pipe->plane_state->is_phantom = false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
index fa8fe5bf7e57..64d01a9cd68c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
@@ -341,6 +341,9 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
break;
}
+ if (dml2->config.bbox_overrides.clks_table.num_states)
+ p->in_states->num_states = dml2->config.bbox_overrides.clks_table.num_states;
+
/* Override from passed values, if available */
for (i = 0; i < p->in_states->num_states; i++) {
if (dml2->config.bbox_overrides.sr_exit_latency_us) {
@@ -397,7 +400,6 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
}
/* Copy clocks tables entries, if available */
if (dml2->config.bbox_overrides.clks_table.num_states) {
- p->in_states->num_states = dml2->config.bbox_overrides.clks_table.num_states;
for (i = 0; i < dml2->config.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels; i++) {
p->in_states->state_array[i].dcfclk_mhz = dml2->config.bbox_overrides.clks_table.clk_entries[i].dcfclk_mhz;
@@ -423,8 +425,9 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
}
for (i = 0; i < dml2->config.bbox_overrides.clks_table.num_entries_per_clk.num_dtbclk_levels; i++) {
- p->in_states->state_array[i].dtbclk_mhz =
- dml2->config.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz;
+ if (dml2->config.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz > 0)
+ p->in_states->state_array[i].dtbclk_mhz =
+ dml2->config.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz;
}
for (i = 0; i < dml2->config.bbox_overrides.clks_table.num_entries_per_clk.num_dispclk_levels; i++) {
@@ -623,8 +626,8 @@ static void populate_dml_output_cfg_from_stream_state(struct dml_output_cfg_st *
if (is_dp2p0_output_encoder(pipe))
out->OutputEncoder[location] = dml_dp2p0;
break;
- out->OutputEncoder[location] = dml_edp;
case SIGNAL_TYPE_EDP:
+ out->OutputEncoder[location] = dml_edp;
break;
case SIGNAL_TYPE_HDMI_TYPE_A:
case SIGNAL_TYPE_DVI_SINGLE_LINK:
@@ -1046,8 +1049,10 @@ static void dml2_populate_pipe_to_plane_index_mapping(struct dml2_context *dml2,
void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_state *context, struct dml_display_cfg_st *dml_dispcfg)
{
- int i = 0, j = 0;
+ int i = 0, j = 0, k = 0;
int disp_cfg_stream_location, disp_cfg_plane_location;
+ enum mall_stream_type stream_mall_type;
+ struct pipe_ctx *current_pipe_context;
for (i = 0; i < __DML2_WRAPPER_MAX_STREAMS_PLANES__; i++) {
dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_stream_id_valid[i] = false;
@@ -1067,7 +1072,17 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
dml2_populate_pipe_to_plane_index_mapping(dml2, context);
for (i = 0; i < context->stream_count; i++) {
+ current_pipe_context = NULL;
+ for (k = 0; k < MAX_PIPES; k++) {
+ /* find one pipe allocated to this stream for the purpose of getting
+ info about the link later */
+ if (context->streams[i] == context->res_ctx.pipe_ctx[k].stream) {
+ current_pipe_context = &context->res_ctx.pipe_ctx[k];
+ break;
+ }
+ }
disp_cfg_stream_location = map_stream_to_dml_display_cfg(dml2, context->streams[i], dml_dispcfg);
+ stream_mall_type = dc_state_get_stream_subvp_type(context, context->streams[i]);
if (disp_cfg_stream_location < 0)
disp_cfg_stream_location = dml_dispcfg->num_timings++;
@@ -1075,7 +1090,7 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location <= __DML2_WRAPPER_MAX_STREAMS_PLANES__);
populate_dml_timing_cfg_from_stream_state(&dml_dispcfg->timing, disp_cfg_stream_location, context->streams[i]);
- populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_stream_location, context->streams[i], &context->res_ctx.pipe_ctx[i]);
+ populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_stream_location, context->streams[i], current_pipe_context);
switch (context->streams[i]->debug.force_odm_combine_segments) {
case 2:
dml2->v20.dml_core_ctx.policy.ODMUse[disp_cfg_stream_location] = dml_odm_use_policy_combine_2to1;
@@ -1112,10 +1127,10 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
populate_dml_surface_cfg_from_plane_state(dml2->v20.dml_core_ctx.project, &dml_dispcfg->surface, disp_cfg_plane_location, context->stream_status[i].plane_states[j]);
populate_dml_plane_cfg_from_plane_state(&dml_dispcfg->plane, disp_cfg_plane_location, context->stream_status[i].plane_states[j], context);
- if (context->streams[i]->mall_stream_config.type == SUBVP_MAIN) {
+ if (stream_mall_type == SUBVP_MAIN) {
dml_dispcfg->plane.UseMALLForPStateChange[disp_cfg_plane_location] = dml_use_mall_pstate_change_sub_viewport;
dml_dispcfg->plane.UseMALLForStaticScreen[disp_cfg_plane_location] = dml_use_mall_static_screen_optimize;
- } else if (context->streams[i]->mall_stream_config.type == SUBVP_PHANTOM) {
+ } else if (stream_mall_type == SUBVP_PHANTOM) {
dml_dispcfg->plane.UseMALLForPStateChange[disp_cfg_plane_location] = dml_use_mall_pstate_change_phantom_pipe;
dml_dispcfg->plane.UseMALLForStaticScreen[disp_cfg_plane_location] = dml_use_mall_static_screen_disable;
dml2->v20.dml_core_ctx.policy.ImmediateFlipRequirement[disp_cfg_plane_location] = dml_immediate_flip_not_required;
@@ -1132,7 +1147,7 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
if (j >= 1) {
populate_dml_timing_cfg_from_stream_state(&dml_dispcfg->timing, disp_cfg_plane_location, context->streams[i]);
- populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_plane_location, context->streams[i], &context->res_ctx.pipe_ctx[i]);
+ populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_plane_location, context->streams[i], current_pipe_context);
switch (context->streams[i]->debug.force_odm_combine_segments) {
case 2:
dml2->v20.dml_core_ctx.policy.ODMUse[disp_cfg_plane_location] = dml_odm_use_policy_combine_2to1;
@@ -1144,9 +1159,9 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
break;
}
- if (context->streams[i]->mall_stream_config.type == SUBVP_MAIN)
+ if (stream_mall_type == SUBVP_MAIN)
dml_dispcfg->plane.UseMALLForPStateChange[disp_cfg_plane_location] = dml_use_mall_pstate_change_sub_viewport;
- else if (context->streams[i]->mall_stream_config.type == SUBVP_PHANTOM)
+ else if (stream_mall_type == SUBVP_PHANTOM)
dml_dispcfg->plane.UseMALLForPStateChange[disp_cfg_plane_location] = dml_use_mall_pstate_change_phantom_pipe;
dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_stream_id[disp_cfg_plane_location] = context->streams[i]->stream_id;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
index 33eab80e89a8..1068b962d1c1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
@@ -155,8 +155,20 @@ unsigned int dml2_util_get_maximum_odm_combine_for_output(bool force_odm_4to1, e
bool is_dp2p0_output_encoder(const struct pipe_ctx *pipe_ctx)
{
+ if (pipe_ctx == NULL || pipe_ctx->stream == NULL)
+ return false;
+
/* If this assert is hit then we have a link encoder dynamic management issue */
ASSERT(pipe_ctx->stream_res.hpo_dp_stream_enc ? pipe_ctx->link_res.hpo_dp_link_enc != NULL : true);
+
+ /* Count MST hubs once by treating only 1st remote sink in topology as an encoder */
+ if (pipe_ctx->stream->link && pipe_ctx->stream->link->remote_sinks[0]) {
+ return (pipe_ctx->stream_res.hpo_dp_stream_enc &&
+ pipe_ctx->link_res.hpo_dp_link_enc &&
+ dc_is_dp_signal(pipe_ctx->stream->signal) &&
+ (pipe_ctx->stream->link->remote_sinks[0]->sink_id == pipe_ctx->stream->sink->sink_id));
+ }
+
return (pipe_ctx->stream_res.hpo_dp_stream_enc &&
pipe_ctx->link_res.hpo_dp_link_enc &&
dc_is_dp_signal(pipe_ctx->stream->signal));
@@ -275,6 +287,7 @@ static void populate_pipe_ctx_dlg_params_from_dml(struct pipe_ctx *pipe_ctx, str
void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *context, struct resource_context *out_new_hw_state, struct dml2_context *in_ctx, unsigned int pipe_cnt)
{
unsigned int dc_pipe_ctx_index, dml_pipe_idx, plane_id;
+ enum mall_stream_type pipe_mall_type;
bool unbounded_req_enabled = false;
struct dml2_calculate_rq_and_dlg_params_scratch *s = &in_ctx->v20.scratch.calculate_rq_and_dlg_params_scratch;
@@ -322,7 +335,8 @@ void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *cont
*/
populate_pipe_ctx_dlg_params_from_dml(&context->res_ctx.pipe_ctx[dc_pipe_ctx_index], &context->bw_ctx.dml2->v20.dml_core_ctx, dml_pipe_idx);
- if (context->res_ctx.pipe_ctx[dc_pipe_ctx_index].stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ pipe_mall_type = dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[dc_pipe_ctx_index]);
+ if (pipe_mall_type == SUBVP_PHANTOM) {
// Phantom pipe requires that DET_SIZE = 0 and no unbounded requests
context->res_ctx.pipe_ctx[dc_pipe_ctx_index].det_buffer_size_kb = 0;
context->res_ctx.pipe_ctx[dc_pipe_ctx_index].unbounded_req = false;
@@ -349,7 +363,7 @@ void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *cont
context->res_ctx.pipe_ctx[dc_pipe_ctx_index].plane_state != context->res_ctx.pipe_ctx[dc_pipe_ctx_index].top_pipe->plane_state) &&
context->res_ctx.pipe_ctx[dc_pipe_ctx_index].prev_odm_pipe == NULL) {
/* SS: all active surfaces stored in MALL */
- if (context->res_ctx.pipe_ctx[dc_pipe_ctx_index].stream->mall_stream_config.type != SUBVP_PHANTOM) {
+ if (pipe_mall_type != SUBVP_PHANTOM) {
context->bw_ctx.bw.dcn.mall_ss_size_bytes += context->res_ctx.pipe_ctx[dc_pipe_ctx_index].surface_size_in_mall_bytes;
} else {
/* SUBVP: phantom surfaces only stored in MALL */
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
index 8f231418870f..26307e599614 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
@@ -418,7 +418,7 @@ static int find_drr_eligible_stream(struct dc_state *display_state)
int i;
for (i = 0; i < display_state->stream_count; i++) {
- if (display_state->streams[i]->mall_stream_config.type == SUBVP_NONE
+ if (dc_state_get_stream_subvp_type(display_state, display_state->streams[i]) == SUBVP_NONE
&& display_state->streams[i]->ignore_msa_timing_param) {
// Use ignore_msa_timing_param flag to identify as DRR
return i;
@@ -634,6 +634,8 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s
dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.b, &dml2->v20.dml_core_ctx);
memcpy(&context->bw_ctx.bw.dcn.watermarks.c, &dml2->v20.g6_temp_read_watermark_set, sizeof(context->bw_ctx.bw.dcn.watermarks.c));
dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.d, &dml2->v20.dml_core_ctx);
+ //copy for deciding zstate use
+ context->bw_ctx.dml.vba.StutterPeriod = context->bw_ctx.dml2->v20.dml_core_ctx.mp.StutterPeriod;
}
return result;
@@ -691,10 +693,15 @@ bool dml2_validate(const struct dc *in_dc, struct dc_state *context, bool fast_v
return out;
}
+static inline struct dml2_context *dml2_allocate_memory(void)
+{
+ return (struct dml2_context *) kzalloc(sizeof(struct dml2_context), GFP_KERNEL);
+}
+
bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
{
// Allocate Mode Lib Ctx
- *dml2 = (struct dml2_context *) kzalloc(sizeof(struct dml2_context), GFP_KERNEL);
+ *dml2 = dml2_allocate_memory();
if (!(*dml2))
return false;
@@ -745,3 +752,25 @@ void dml2_extract_dram_and_fclk_change_support(struct dml2_context *dml2,
*fclk_change_support = (unsigned int) dml2->v20.dml_core_ctx.ms.support.FCLKChangeSupport[0];
*dram_clk_change_support = (unsigned int) dml2->v20.dml_core_ctx.ms.support.DRAMClockChangeSupport[0];
}
+
+void dml2_copy(struct dml2_context *dst_dml2,
+ struct dml2_context *src_dml2)
+{
+ /* copy Mode Lib Ctx */
+ memcpy(dst_dml2, src_dml2, sizeof(struct dml2_context));
+}
+
+bool dml2_create_copy(struct dml2_context **dst_dml2,
+ struct dml2_context *src_dml2)
+{
+ /* Allocate Mode Lib Ctx */
+ *dst_dml2 = dml2_allocate_memory();
+
+ if (!(*dst_dml2))
+ return false;
+
+ /* copy Mode Lib Ctx */
+ dml2_copy(*dst_dml2, src_dml2);
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
index fe15baa4bf09..ee0eb184eb6d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
@@ -93,15 +93,34 @@ struct dml2_dc_callbacks {
struct dml2_dc_svp_callbacks {
struct dc *dc;
bool (*build_scaling_params)(struct pipe_ctx *pipe_ctx);
- struct dc_stream_state* (*create_stream_for_sink)(struct dc_sink *dc_sink_data);
- struct dc_plane_state* (*create_plane)(struct dc *dc);
- enum dc_status (*add_stream_to_ctx)(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream);
- bool (*add_plane_to_context)(const struct dc *dc, struct dc_stream_state *stream, struct dc_plane_state *plane_state, struct dc_state *context);
- bool (*remove_plane_from_context)(const struct dc *dc, struct dc_stream_state *stream, struct dc_plane_state *plane_state, struct dc_state *context);
- enum dc_status (*remove_stream_from_ctx)(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *stream);
- void (*plane_state_release)(struct dc_plane_state *plane_state);
- void (*stream_release)(struct dc_stream_state *stream);
+ struct dc_stream_state* (*create_phantom_stream)(const struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *main_stream);
+ struct dc_plane_state* (*create_phantom_plane)(struct dc *dc,
+ struct dc_state *state,
+ struct dc_plane_state *main_plane);
+ enum dc_status (*add_phantom_stream)(struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *phantom_stream,
+ struct dc_stream_state *main_stream);
+ bool (*add_phantom_plane)(const struct dc *dc, struct dc_stream_state *stream, struct dc_plane_state *plane_state, struct dc_state *context);
+ bool (*remove_phantom_plane)(const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_plane_state *plane_state,
+ struct dc_state *context);
+ enum dc_status (*remove_phantom_stream)(struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *stream);
+ void (*release_phantom_plane)(const struct dc *dc,
+ struct dc_state *state,
+ struct dc_plane_state *plane);
+ void (*release_phantom_stream)(const struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *stream);
void (*release_dsc)(struct resource_context *res_ctx, const struct resource_pool *pool, struct display_stream_compressor **dsc);
+ enum mall_stream_type (*get_pipe_subvp_type)(const struct dc_state *state, const struct pipe_ctx *pipe_ctx);
+ enum mall_stream_type (*get_stream_subvp_type)(const struct dc_state *state, const struct dc_stream_state *stream);
+ struct dc_stream_state *(*get_paired_subvp_stream)(const struct dc_state *state, const struct dc_stream_state *stream);
};
struct dml2_clks_table_entry {
@@ -191,6 +210,10 @@ bool dml2_create(const struct dc *in_dc,
struct dml2_context **dml2);
void dml2_destroy(struct dml2_context *dml2);
+void dml2_copy(struct dml2_context *dst_dml2,
+ struct dml2_context *src_dml2);
+bool dml2_create_copy(struct dml2_context **dst_dml2,
+ struct dml2_context *src_dml2);
/*
* dml2_validate - Determines if a display configuration is supported or not.
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/Makefile b/drivers/gpu/drm/amd/display/dc/hwss/Makefile
index bccd46bd1815..254136f8e3f9 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/hwss/Makefile
@@ -78,7 +78,7 @@ ifdef CONFIG_DRM_AMD_DC_FP
# DCN
###############################################################################
-HWSS_DCN10 = dcn10_hwseq.o
+HWSS_DCN10 = dcn10_hwseq.o dcn10_init.o
AMD_DAL_HWSS_DCN10 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn10/,$(HWSS_DCN10))
@@ -86,7 +86,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN10)
###############################################################################
-HWSS_DCN20 = dcn20_hwseq.o
+HWSS_DCN20 = dcn20_hwseq.o dcn20_init.o
AMD_DAL_HWSS_DCN20 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn20/,$(HWSS_DCN20))
@@ -94,7 +94,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN20)
###############################################################################
-HWSS_DCN201 = dcn201_hwseq.o
+HWSS_DCN201 = dcn201_hwseq.o dcn201_init.o
AMD_DAL_HWSS_DCN201 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn201/,$(HWSS_DCN201))
@@ -102,7 +102,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN201)
###############################################################################
-HWSS_DCN21 = dcn21_hwseq.o
+HWSS_DCN21 = dcn21_hwseq.o dcn21_init.o
AMD_DAL_HWSS_DCN21 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn21/,$(HWSS_DCN21))
@@ -114,7 +114,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN21)
###############################################################################
-HWSS_DCN30 = dcn30_hwseq.o
+HWSS_DCN30 = dcn30_hwseq.o dcn30_init.o
AMD_DAL_HWSS_DCN30 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn30/,$(HWSS_DCN30))
@@ -122,7 +122,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN30)
###############################################################################
-HWSS_DCN301 = dcn301_hwseq.o
+HWSS_DCN301 = dcn301_hwseq.o dcn301_init.o
AMD_DAL_HWSS_DCN301 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn301/,$(HWSS_DCN301))
@@ -130,15 +130,17 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN301)
###############################################################################
-HWSS_DCN302 = dcn302_hwseq.o
+HWSS_DCN302 = dcn302_hwseq.o dcn302_init.o
AMD_DAL_HWSS_DCN302 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn302/,$(HWSS_DCN302))
AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN302)
+
+
###############################################################################
-HWSS_DCN303 = dcn303_hwseq.o
+HWSS_DCN303 = dcn303_hwseq.o dcn303_init.o
AMD_DAL_HWSS_DCN303 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn303/,$(HWSS_DCN303))
@@ -146,7 +148,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN303)
###############################################################################
-HWSS_DCN31 = dcn31_hwseq.o
+HWSS_DCN31 = dcn31_hwseq.o dcn31_init.o
AMD_DAL_HWSS_DCN31 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn31/,$(HWSS_DCN31))
@@ -154,7 +156,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN31)
###############################################################################
-HWSS_DCN314 = dcn314_hwseq.o
+HWSS_DCN314 = dcn314_hwseq.o dcn314_init.o
AMD_DAL_HWSS_DCN314 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn314/,$(HWSS_DCN314))
@@ -162,7 +164,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN314)
###############################################################################
-HWSS_DCN32 = dcn32_hwseq.o
+HWSS_DCN32 = dcn32_hwseq.o dcn32_init.o
AMD_DAL_HWSS_DCN32 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn32/,$(HWSS_DCN32))
@@ -170,7 +172,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN32)
###############################################################################
-HWSS_DCN35 = dcn35_hwseq.o
+HWSS_DCN35 = dcn35_hwseq.o dcn35_init.o
AMD_DAL_HWSS_DCN35 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn35/,$(HWSS_DCN35))
@@ -180,4 +182,4 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN35)
###############################################################################
-endif \ No newline at end of file
+endif
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
index 0a331d17ee92..5660f15da291 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
@@ -55,6 +55,7 @@
#include "audio.h"
#include "reg_helper.h"
#include "panel_cntl.h"
+#include "dc_state_priv.h"
#include "dpcd_defs.h"
/* include DCE11 register header files */
#include "dce/dce_11_0_d.h"
@@ -790,7 +791,7 @@ void dce110_edp_power_control(
struct dc_context *ctx = link->ctx;
struct bp_transmitter_control cntl = { 0 };
enum bp_result bp_result;
- uint8_t panel_instance;
+ uint8_t pwrseq_instance;
if (dal_graphics_object_id_get_connector_id(link->link_enc->connector)
@@ -873,7 +874,7 @@ void dce110_edp_power_control(
cntl.coherent = false;
cntl.lanes_number = LANE_COUNT_FOUR;
cntl.hpd_sel = link->link_enc->hpd_source;
- panel_instance = link->panel_cntl->inst;
+ pwrseq_instance = link->panel_cntl->pwrseq_inst;
if (ctx->dc->ctx->dmub_srv &&
ctx->dc->debug.dmub_command_table) {
@@ -881,11 +882,11 @@ void dce110_edp_power_control(
if (cntl.action == TRANSMITTER_CONTROL_POWER_ON) {
bp_result = ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios,
LVTMA_CONTROL_POWER_ON,
- panel_instance, link->link_powered_externally);
+ pwrseq_instance, link->link_powered_externally);
} else {
bp_result = ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios,
LVTMA_CONTROL_POWER_OFF,
- panel_instance, link->link_powered_externally);
+ pwrseq_instance, link->link_powered_externally);
}
}
@@ -956,7 +957,7 @@ void dce110_edp_backlight_control(
{
struct dc_context *ctx = link->ctx;
struct bp_transmitter_control cntl = { 0 };
- uint8_t panel_instance;
+ uint8_t pwrseq_instance;
unsigned int pre_T11_delay = OLED_PRE_T11_DELAY;
unsigned int post_T7_delay = OLED_POST_T7_DELAY;
@@ -1009,7 +1010,7 @@ void dce110_edp_backlight_control(
*/
/* dc_service_sleep_in_milliseconds(50); */
/*edp 1.2*/
- panel_instance = link->panel_cntl->inst;
+ pwrseq_instance = link->panel_cntl->pwrseq_inst;
if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_ON) {
if (!link->dc->config.edp_no_power_sequencing)
@@ -1034,11 +1035,11 @@ void dce110_edp_backlight_control(
if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_ON)
ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios,
LVTMA_CONTROL_LCD_BLON,
- panel_instance, link->link_powered_externally);
+ pwrseq_instance, link->link_powered_externally);
else
ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios,
LVTMA_CONTROL_LCD_BLOFF,
- panel_instance, link->link_powered_externally);
+ pwrseq_instance, link->link_powered_externally);
}
link_transmitter_control(ctx->dc_bios, &cntl);
@@ -1353,7 +1354,7 @@ static void build_audio_output(
if (state->clk_mgr &&
(pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)) {
- audio_output->pll_info.dp_dto_source_clock_in_khz =
+ audio_output->pll_info.audio_dto_source_clock_in_khz =
state->clk_mgr->funcs->get_dp_ref_clk_frequency(
state->clk_mgr);
}
@@ -1596,7 +1597,7 @@ static enum dc_status apply_single_controller_ctx_to_hw(
* is constructed with the same sink). Make sure not to override
* and link programming on the main.
*/
- if (pipe_ctx->stream->mall_stream_config.type != SUBVP_PHANTOM) {
+ if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) {
pipe_ctx->stream->link->psr_settings.psr_feature_enabled = false;
pipe_ctx->stream->link->replay_settings.replay_feature_enabled = false;
}
@@ -1684,7 +1685,7 @@ static void disable_vga_and_power_gate_all_controllers(
true);
dc->current_state->res_ctx.pipe_ctx[i].pipe_idx = i;
- dc->hwss.disable_plane(dc,
+ dc->hwss.disable_plane(dc, dc->current_state,
&dc->current_state->res_ctx.pipe_ctx[i]);
}
}
@@ -2124,7 +2125,8 @@ static void dce110_reset_hw_ctx_wrap(
BREAK_TO_DEBUGGER();
}
pipe_ctx_old->stream_res.tg->funcs->disable_crtc(pipe_ctx_old->stream_res.tg);
- pipe_ctx_old->stream->link->phy_state.symclk_ref_cnts.otg = 0;
+ if (dc_is_hdmi_tmds_signal(pipe_ctx_old->stream->signal))
+ pipe_ctx_old->stream->link->phy_state.symclk_ref_cnts.otg = 0;
pipe_ctx_old->plane_res.mi->funcs->free_mem_input(
pipe_ctx_old->plane_res.mi, dc->current_state->stream_count);
@@ -2133,7 +2135,7 @@ static void dce110_reset_hw_ctx_wrap(
old_clk))
old_clk->funcs->cs_power_down(old_clk);
- dc->hwss.disable_plane(dc, pipe_ctx_old);
+ dc->hwss.disable_plane(dc, dc->current_state, pipe_ctx_old);
pipe_ctx_old->stream = NULL;
}
@@ -2497,6 +2499,7 @@ static bool wait_for_reset_trigger_to_occur(
/* Enable timing synchronization for a group of Timing Generators. */
static void dce110_enable_timing_synchronization(
struct dc *dc,
+ struct dc_state *state,
int group_index,
int group_size,
struct pipe_ctx *grouped_pipes[])
@@ -2590,6 +2593,7 @@ static void init_hw(struct dc *dc)
struct dmcu *dmcu;
struct dce_hwseq *hws = dc->hwseq;
uint32_t backlight = MAX_BACKLIGHT_LEVEL;
+ uint32_t user_level = MAX_BACKLIGHT_LEVEL;
bp = dc->ctx->dc_bios;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -2639,13 +2643,15 @@ static void init_hw(struct dc *dc)
for (i = 0; i < dc->link_count; i++) {
struct dc_link *link = dc->links[i];
- if (link->panel_cntl)
+ if (link->panel_cntl) {
backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
+ user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL;
+ }
}
abm = dc->res_pool->abm;
if (abm != NULL)
- abm->funcs->abm_init(abm, backlight);
+ abm->funcs->abm_init(abm, backlight, user_level);
dmcu = dc->res_pool->dmcu;
if (dmcu != NULL && abm != NULL)
@@ -2842,7 +2848,7 @@ static void dce110_post_unlock_program_front_end(
{
}
-static void dce110_power_down_fe(struct dc *dc, struct pipe_ctx *pipe_ctx)
+static void dce110_power_down_fe(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx)
{
struct dce_hwseq *hws = dc->hwseq;
int fe_idx = pipe_ctx->plane_res.mi ?
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
index 2b8b8366538e..6dd479e8a348 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
@@ -56,6 +56,7 @@
#include "dc_trace.h"
#include "dce/dmub_outbox.h"
#include "link.h"
+#include "dc_state_priv.h"
#define DC_LOGGER \
dc_logger
@@ -115,7 +116,7 @@ void dcn10_lock_all_pipes(struct dc *dc,
!pipe_ctx->stream ||
(!pipe_ctx->plane_state && !old_pipe_ctx->plane_state) ||
!tg->funcs->is_tg_enabled(tg) ||
- pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM)
continue;
if (lock)
@@ -1057,7 +1058,8 @@ static void dcn10_reset_back_end_for_pipe(
if (pipe_ctx->stream_res.tg->funcs->set_drr)
pipe_ctx->stream_res.tg->funcs->set_drr(
pipe_ctx->stream_res.tg, NULL);
- pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
+ if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
}
for (i = 0; i < dc->res_pool->pipe_count; i++)
@@ -1180,7 +1182,9 @@ void dcn10_verify_allow_pstate_change_high(struct dc *dc)
}
/* trigger HW to start disconnect plane from stream on the next vsync */
-void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn10_plane_atomic_disconnect(struct dc *dc,
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx)
{
struct dce_hwseq *hws = dc->hwseq;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
@@ -1200,7 +1204,7 @@ void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
// Phantom pipes have OTG disabled by default, so MPCC_STATUS will never assert idle,
// so don't wait for MPCC_IDLE in the programming sequence
- if (opp != NULL && !pipe_ctx->plane_state->is_phantom)
+ if (opp != NULL && dc_state_get_pipe_subvp_type(state, pipe_ctx) != SUBVP_PHANTOM)
opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
dc->optimized_required = true;
@@ -1290,7 +1294,7 @@ void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_state = NULL;
}
-void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn10_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx)
{
struct dce_hwseq *hws = dc->hwseq;
DC_LOGGER_INIT(dc->ctx->logger);
@@ -1416,12 +1420,12 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
- hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
+ hws->funcs.plane_atomic_disconnect(dc, context, pipe_ctx);
if (tg->funcs->is_tg_enabled(tg))
tg->funcs->unlock(tg);
- dc->hwss.disable_plane(dc, pipe_ctx);
+ dc->hwss.disable_plane(dc, context, pipe_ctx);
pipe_ctx->stream_res.tg = NULL;
pipe_ctx->plane_res.hubp = NULL;
@@ -1486,6 +1490,7 @@ void dcn10_init_hw(struct dc *dc)
struct dc_bios *dcb = dc->ctx->dc_bios;
struct resource_pool *res_pool = dc->res_pool;
uint32_t backlight = MAX_BACKLIGHT_LEVEL;
+ uint32_t user_level = MAX_BACKLIGHT_LEVEL;
bool is_optimized_init_done = false;
if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
@@ -1583,12 +1588,14 @@ void dcn10_init_hw(struct dc *dc)
for (i = 0; i < dc->link_count; i++) {
struct dc_link *link = dc->links[i];
- if (link->panel_cntl)
+ if (link->panel_cntl) {
backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
+ user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL;
+ }
}
if (abm != NULL)
- abm->funcs->abm_init(abm, backlight);
+ abm->funcs->abm_init(abm, backlight, user_level);
if (dmcu != NULL && !dmcu->auto_load_dmcu)
dmcu->funcs->dmcu_init(dmcu);
@@ -2262,6 +2269,7 @@ void dcn10_enable_vblanks_synchronization(
void dcn10_enable_timing_synchronization(
struct dc *dc,
+ struct dc_state *state,
int group_index,
int group_size,
struct pipe_ctx *grouped_pipes[])
@@ -2276,7 +2284,7 @@ void dcn10_enable_timing_synchronization(
DC_SYNC_INFO("Setting up OTG reset trigger\n");
for (i = 1; i < group_size; i++) {
- if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
continue;
opp = grouped_pipes[i]->stream_res.opp;
@@ -2296,14 +2304,14 @@ void dcn10_enable_timing_synchronization(
if (grouped_pipes[i]->stream == NULL)
continue;
- if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
continue;
grouped_pipes[i]->stream->vblank_synchronized = false;
}
for (i = 1; i < group_size; i++) {
- if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
continue;
grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
@@ -2317,11 +2325,11 @@ void dcn10_enable_timing_synchronization(
* synchronized. Look at last pipe programmed to reset.
*/
- if (grouped_pipes[1]->stream && grouped_pipes[1]->stream->mall_stream_config.type != SUBVP_PHANTOM)
+ if (grouped_pipes[1]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[1]) != SUBVP_PHANTOM)
wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
for (i = 1; i < group_size; i++) {
- if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
continue;
grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
@@ -2329,7 +2337,7 @@ void dcn10_enable_timing_synchronization(
}
for (i = 1; i < group_size; i++) {
- if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ if (dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
continue;
opp = grouped_pipes[i]->stream_res.opp;
@@ -3021,7 +3029,7 @@ void dcn10_post_unlock_program_front_end(
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
- dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
+ dc->hwss.disable_plane(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) {
@@ -3417,7 +3425,8 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
.rotation = pipe_ctx->plane_state->rotation,
- .mirror = pipe_ctx->plane_state->horizontal_mirror
+ .mirror = pipe_ctx->plane_state->horizontal_mirror,
+ .stream = pipe_ctx->stream,
};
bool pipe_split_on = false;
bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h
index ef6d56da417c..bc5dd68a2408 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h
@@ -75,7 +75,7 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx);
void dcn10_reset_hw_ctx_wrap(
struct dc *dc,
struct dc_state *context);
-void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn10_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx);
void dcn10_lock_all_pipes(
struct dc *dc,
struct dc_state *context,
@@ -108,13 +108,16 @@ void dcn10_power_down_on_boot(struct dc *dc);
enum dc_status dce110_apply_ctx_to_hw(
struct dc *dc,
struct dc_state *context);
-void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn10_plane_atomic_disconnect(struct dc *dc,
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx);
void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data);
void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx);
void dce110_power_down(struct dc *dc);
void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context);
void dcn10_enable_timing_synchronization(
struct dc *dc,
+ struct dc_state *state,
int group_index,
int group_size,
struct pipe_ctx *grouped_pipes[]);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.c
index a5bdac79a744..a5bdac79a744 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.h
index 8c6fd7b844a4..8c6fd7b844a4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.h
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
index 608221b0dd5d..e931342fcf4c 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
@@ -55,6 +55,7 @@
#include "inc/link_enc_cfg.h"
#include "link_hwss.h"
#include "link.h"
+#include "dc_state_priv.h"
#define DC_LOGGER \
dc_logger
@@ -623,9 +624,9 @@ void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
}
-void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn20_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx)
{
- bool is_phantom = pipe_ctx->plane_state && pipe_ctx->plane_state->is_phantom;
+ bool is_phantom = dc_state_get_pipe_subvp_type(state, pipe_ctx) == SUBVP_PHANTOM;
struct timing_generator *tg = is_phantom ? pipe_ctx->stream_res.tg : NULL;
DC_LOGGER_INIT(dc->ctx->logger);
@@ -847,7 +848,7 @@ enum dc_status dcn20_enable_stream_timing(
/* TODO enable stream if timing changed */
/* TODO unblank stream if DP */
- if (pipe_ctx->stream && pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ if (pipe_ctx->stream && dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM) {
if (pipe_ctx->stream_res.tg && pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable)
pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable(pipe_ctx->stream_res.tg);
}
@@ -1368,8 +1369,14 @@ void dcn20_pipe_control_lock(
}
}
-static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx *new_pipe)
+static void dcn20_detect_pipe_changes(struct dc_state *old_state,
+ struct dc_state *new_state,
+ struct pipe_ctx *old_pipe,
+ struct pipe_ctx *new_pipe)
{
+ bool old_is_phantom = dc_state_get_pipe_subvp_type(old_state, old_pipe) == SUBVP_PHANTOM;
+ bool new_is_phantom = dc_state_get_pipe_subvp_type(new_state, new_pipe) == SUBVP_PHANTOM;
+
new_pipe->update_flags.raw = 0;
/* If non-phantom pipe is being transitioned to a phantom pipe,
@@ -1379,8 +1386,8 @@ static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx
* be different). The post_unlock sequence will set the correct
* update flags to enable the phantom pipe.
*/
- if (old_pipe->plane_state && !old_pipe->plane_state->is_phantom &&
- new_pipe->plane_state && new_pipe->plane_state->is_phantom) {
+ if (old_pipe->plane_state && !old_is_phantom &&
+ new_pipe->plane_state && new_is_phantom) {
new_pipe->update_flags.bits.disable = 1;
return;
}
@@ -1400,6 +1407,10 @@ static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx
new_pipe->update_flags.bits.scaler = 1;
new_pipe->update_flags.bits.viewport = 1;
new_pipe->update_flags.bits.det_size = 1;
+ if (new_pipe->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE &&
+ new_pipe->stream_res.test_pattern_params.width != 0 &&
+ new_pipe->stream_res.test_pattern_params.height != 0)
+ new_pipe->update_flags.bits.test_pattern_changed = 1;
if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) {
new_pipe->update_flags.bits.odm = 1;
new_pipe->update_flags.bits.global_sync = 1;
@@ -1412,14 +1423,14 @@ static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx
* The remove-add sequence of the phantom pipe always results in the pipe
* being blanked in enable_stream_timing (DPG).
*/
- if (new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ if (new_pipe->stream && dc_state_get_pipe_subvp_type(new_state, new_pipe) == SUBVP_PHANTOM)
new_pipe->update_flags.bits.enable = 1;
/* Phantom pipes are effectively disabled, if the pipe was previously phantom
* we have to enable
*/
- if (old_pipe->plane_state && old_pipe->plane_state->is_phantom &&
- new_pipe->plane_state && !new_pipe->plane_state->is_phantom)
+ if (old_pipe->plane_state && old_is_phantom &&
+ new_pipe->plane_state && !new_is_phantom)
new_pipe->update_flags.bits.enable = 1;
if (old_pipe->plane_state && !new_pipe->plane_state) {
@@ -1556,6 +1567,7 @@ static void dcn20_update_dchubp_dpp(
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct dccg *dccg = dc->res_pool->dccg;
bool viewport_changed = false;
+ enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe_ctx);
if (pipe_ctx->update_flags.bits.dppclk)
dpp->funcs->dpp_dppclk_control(dpp, false, true);
@@ -1701,7 +1713,7 @@ static void dcn20_update_dchubp_dpp(
pipe_ctx->update_flags.bits.plane_changed ||
plane_state->update_flags.bits.addr_update) {
if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) &&
- pipe_ctx->stream->mall_stream_config.type == SUBVP_MAIN) {
+ pipe_mall_type == SUBVP_MAIN) {
union block_sequence_params params;
params.subvp_save_surf_addr.dc_dmub_srv = dc->ctx->dmub_srv;
@@ -1715,7 +1727,7 @@ static void dcn20_update_dchubp_dpp(
if (pipe_ctx->update_flags.bits.enable)
hubp->funcs->set_blank(hubp, false);
/* If the stream paired with this plane is phantom, the plane is also phantom */
- if (pipe_ctx->stream && pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM
+ if (pipe_ctx->stream && pipe_mall_type == SUBVP_PHANTOM
&& hubp->funcs->phantom_hubp_post_enable)
hubp->funcs->phantom_hubp_post_enable(hubp);
}
@@ -1773,7 +1785,7 @@ static void dcn20_program_pipe(
pipe_ctx->pipe_dlg_param.vupdate_offset,
pipe_ctx->pipe_dlg_param.vupdate_width);
- if (pipe_ctx->stream->mall_stream_config.type != SUBVP_PHANTOM)
+ if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
pipe_ctx->stream_res.tg->funcs->set_vtg_params(
@@ -1877,6 +1889,8 @@ void dcn20_program_front_end_for_ctx(
int i;
struct dce_hwseq *hws = dc->hwseq;
DC_LOGGER_INIT(dc->ctx->logger);
+ unsigned int prev_hubp_count = 0;
+ unsigned int hubp_count = 0;
if (resource_is_pipe_topology_changed(dc->current_state, context))
resource_log_pipe_topology_update(dc, context);
@@ -1894,9 +1908,23 @@ void dcn20_program_front_end_for_ctx(
}
}
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].plane_state)
+ prev_hubp_count++;
+ if (context->res_ctx.pipe_ctx[i].plane_state)
+ hubp_count++;
+ }
+
+ if (prev_hubp_count == 0 && hubp_count > 0) {
+ if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
+ dc->res_pool->hubbub->funcs->force_pstate_change_control(
+ dc->res_pool->hubbub, true, false);
+ udelay(500);
+ }
+
/* Set pipe update flags and lock pipes */
for (i = 0; i < dc->res_pool->pipe_count; i++)
- dcn20_detect_pipe_changes(&dc->current_state->res_ctx.pipe_ctx[i],
+ dcn20_detect_pipe_changes(dc->current_state, context, &dc->current_state->res_ctx.pipe_ctx[i],
&context->res_ctx.pipe_ctx[i]);
/* When disabling phantom pipes, turn on phantom OTG first (so we can get double
@@ -1906,15 +1934,16 @@ void dcn20_program_front_end_for_ctx(
struct dc_stream_state *stream = dc->current_state->res_ctx.pipe_ctx[i].stream;
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable && stream &&
- dc->current_state->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ dc_state_get_pipe_subvp_type(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM) {
struct timing_generator *tg = dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg;
if (tg->funcs->enable_crtc) {
if (dc->hwss.blank_phantom) {
int main_pipe_width, main_pipe_height;
+ struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(dc->current_state, dc->current_state->res_ctx.pipe_ctx[i].stream);
- main_pipe_width = dc->current_state->res_ctx.pipe_ctx[i].stream->mall_stream_config.paired_stream->dst.width;
- main_pipe_height = dc->current_state->res_ctx.pipe_ctx[i].stream->mall_stream_config.paired_stream->dst.height;
+ main_pipe_width = phantom_stream->dst.width;
+ main_pipe_height = phantom_stream->dst.height;
dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height);
}
tg->funcs->enable_crtc(tg);
@@ -1943,9 +1972,9 @@ void dcn20_program_front_end_for_ctx(
* DET allocation.
*/
if (hubbub->funcs->program_det_size && (context->res_ctx.pipe_ctx[i].update_flags.bits.disable ||
- (context->res_ctx.pipe_ctx[i].plane_state && context->res_ctx.pipe_ctx[i].plane_state->is_phantom)))
+ (context->res_ctx.pipe_ctx[i].plane_state && dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM)))
hubbub->funcs->program_det_size(hubbub, dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
- hws->funcs.plane_atomic_disconnect(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
+ hws->funcs.plane_atomic_disconnect(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
}
@@ -1968,7 +1997,7 @@ void dcn20_program_front_end_for_ctx(
* but the MPO still exists until the double buffered update of the main pipe so we
* will get a frame of underflow if the phantom pipe is programmed here.
*/
- if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_PHANTOM)
+ if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM)
dcn20_program_pipe(dc, pipe, context);
}
@@ -2018,7 +2047,7 @@ void dcn20_post_unlock_program_front_end(
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
- dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
+ dc->hwss.disable_plane(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
/*
* If we are enabling a pipe, we need to wait for pending clear as this is a critical
@@ -2030,7 +2059,7 @@ void dcn20_post_unlock_program_front_end(
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
// Don't check flip pending on phantom pipes
if (pipe->plane_state && !pipe->top_pipe && pipe->update_flags.bits.enable &&
- pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) {
+ dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) {
struct hubp *hubp = pipe->plane_res.hubp;
int j = 0;
for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_US / polling_interval_us
@@ -2039,6 +2068,10 @@ void dcn20_post_unlock_program_front_end(
}
}
+ if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
+ dc->res_pool->hubbub->funcs->force_pstate_change_control(
+ dc->res_pool->hubbub, false, false);
+
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
@@ -2049,7 +2082,7 @@ void dcn20_post_unlock_program_front_end(
* programming sequence).
*/
while (pipe) {
- if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
/* When turning on the phantom pipe we want to run through the
* entire enable sequence, so apply all the "enable" flags.
*/
@@ -2119,7 +2152,7 @@ void dcn20_prepare_bandwidth(
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
// At optimize don't restore the original watermark value
- if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE) {
+ if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_NONE) {
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 4U * 1000U * 1000U * 1000U;
break;
}
@@ -2163,7 +2196,7 @@ void dcn20_optimize_bandwidth(
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
// At optimize don't need to restore the original watermark value
- if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE) {
+ if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_NONE) {
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 4U * 1000U * 1000U * 1000U;
break;
}
@@ -2197,7 +2230,8 @@ void dcn20_optimize_bandwidth(
dc->clk_mgr,
context,
true);
- if (context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW) {
+ if (context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW &&
+ !dc->debug.disable_extblankadj) {
for (i = 0; i < dc->res_pool->pipe_count; ++i) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
@@ -2590,7 +2624,8 @@ static void dcn20_reset_back_end_for_pipe(
* the case where the same symclk is shared across multiple otg
* instances
*/
- link->phy_state.symclk_ref_cnts.otg = 0;
+ if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
+ link->phy_state.symclk_ref_cnts.otg = 0;
if (link->phy_state.symclk_state == SYMCLK_ON_TX_OFF) {
link_hwss->disable_link_output(link,
&pipe_ctx->link_res, pipe_ctx->stream->signal);
@@ -2923,7 +2958,7 @@ void dcn20_fpga_init_hw(struct dc *dc)
dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
/*to do*/
- hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
+ hws->funcs.plane_atomic_disconnect(dc, context, pipe_ctx);
}
/* initialize DWB pointer to MCIF_WB */
@@ -2940,7 +2975,7 @@ void dcn20_fpga_init_hw(struct dc *dc)
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- dc->hwss.disable_plane(dc, pipe_ctx);
+ dc->hwss.disable_plane(dc, context, pipe_ctx);
pipe_ctx->stream_res.tg = NULL;
pipe_ctx->plane_res.hubp = NULL;
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h
index ab02e4e9c8c2..b94c85340abf 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h
@@ -52,7 +52,7 @@ void dcn20_program_output_csc(struct dc *dc,
void dcn20_enable_stream(struct pipe_ctx *pipe_ctx);
void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx,
struct dc_link_settings *link_settings);
-void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx);
void dcn20_disable_pixel_data(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c
index 884e3e323338..884e3e323338 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.h
index 12277797cd71..12277797cd71 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.h
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c
index d3fe6092f50e..d5769f38874f 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c
@@ -320,7 +320,7 @@ void dcn201_init_hw(struct dc *dc)
res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
pipe_ctx->stream_res.opp = res_pool->opps[i];
/*To do: number of MPCC != number of opp*/
- hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
+ hws->funcs.plane_atomic_disconnect(dc, context, pipe_ctx);
}
/* initialize DWB pointer to MCIF_WB */
@@ -337,7 +337,7 @@ void dcn201_init_hw(struct dc *dc)
for (i = 0; i < res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- dc->hwss.disable_plane(dc, pipe_ctx);
+ dc->hwss.disable_plane(dc, context, pipe_ctx);
pipe_ctx->stream_res.tg = NULL;
pipe_ctx->plane_res.hubp = NULL;
@@ -369,7 +369,9 @@ void dcn201_init_hw(struct dc *dc)
}
/* trigger HW to start disconnect plane from stream on the next vsync */
-void dcn201_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn201_plane_atomic_disconnect(struct dc *dc,
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx)
{
struct dce_hwseq *hws = dc->hwseq;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.h
index 26cd62be6418..6a50a9894be6 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.h
@@ -33,7 +33,7 @@ void dcn201_init_hw(struct dc *dc);
void dcn201_unblank_stream(struct pipe_ctx *pipe_ctx,
struct dc_link_settings *link_settings);
void dcn201_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx);
-void dcn201_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn201_plane_atomic_disconnect(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx);
void dcn201_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx);
void dcn201_set_cursor_attribute(struct pipe_ctx *pipe_ctx);
void dcn201_pipe_control_lock(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.c
index a13bf6c9386e..a13bf6c9386e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.h
index 1168887b033d..1168887b033d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.h
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
index 467812cf3368..8e88dcaf88f5 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
@@ -137,7 +137,8 @@ void dcn21_PLAT_58856_wa(struct dc_state *context, struct pipe_ctx *pipe_ctx)
pipe_ctx->stream->dpms_off = true;
}
-static bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst)
+static bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst,
+ uint32_t option, uint32_t panel_inst, uint32_t pwrseq_inst)
{
union dmub_rb_cmd cmd;
struct dc_context *dc = abm->ctx;
@@ -147,12 +148,13 @@ static bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t optio
cmd.abm_set_pipe.header.type = DMUB_CMD__ABM;
cmd.abm_set_pipe.header.sub_type = DMUB_CMD__ABM_SET_PIPE;
cmd.abm_set_pipe.abm_set_pipe_data.otg_inst = otg_inst;
+ cmd.abm_set_pipe.abm_set_pipe_data.pwrseq_inst = pwrseq_inst;
cmd.abm_set_pipe.abm_set_pipe_data.set_pipe_option = option;
cmd.abm_set_pipe.abm_set_pipe_data.panel_inst = panel_inst;
cmd.abm_set_pipe.abm_set_pipe_data.ramping_boundary = ramping_boundary;
cmd.abm_set_pipe.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pipe_data);
- dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
return true;
}
@@ -171,7 +173,7 @@ static void dmub_abm_set_backlight(struct dc_context *dc, uint32_t backlight_pwm
cmd.abm_set_backlight.abm_set_backlight_data.panel_mask = (0x01 << panel_inst);
cmd.abm_set_backlight.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_backlight_data);
- dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
void dcn21_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx)
@@ -179,7 +181,6 @@ void dcn21_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx)
struct abm *abm = pipe_ctx->stream_res.abm;
uint32_t otg_inst = pipe_ctx->stream_res.tg->inst;
struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl;
-
struct dmcu *dmcu = pipe_ctx->stream->ctx->dc->res_pool->dmcu;
if (dmcu) {
@@ -190,9 +191,13 @@ void dcn21_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx)
if (abm && panel_cntl) {
if (abm->funcs && abm->funcs->set_pipe_ex) {
abm->funcs->set_pipe_ex(abm, otg_inst, SET_ABM_PIPE_IMMEDIATELY_DISABLE,
- panel_cntl->inst);
+ panel_cntl->inst, panel_cntl->pwrseq_inst);
} else {
- dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_IMMEDIATELY_DISABLE, panel_cntl->inst);
+ dmub_abm_set_pipe(abm,
+ otg_inst,
+ SET_ABM_PIPE_IMMEDIATELY_DISABLE,
+ panel_cntl->inst,
+ panel_cntl->pwrseq_inst);
}
panel_cntl->funcs->store_backlight_level(panel_cntl);
}
@@ -212,9 +217,16 @@ void dcn21_set_pipe(struct pipe_ctx *pipe_ctx)
if (abm && panel_cntl) {
if (abm->funcs && abm->funcs->set_pipe_ex) {
- abm->funcs->set_pipe_ex(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst);
+ abm->funcs->set_pipe_ex(abm,
+ otg_inst,
+ SET_ABM_PIPE_NORMAL,
+ panel_cntl->inst,
+ panel_cntl->pwrseq_inst);
} else {
- dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst);
+ dmub_abm_set_pipe(abm, otg_inst,
+ SET_ABM_PIPE_NORMAL,
+ panel_cntl->inst,
+ panel_cntl->pwrseq_inst);
}
}
}
@@ -237,9 +249,17 @@ bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx,
if (abm && panel_cntl) {
if (abm->funcs && abm->funcs->set_pipe_ex) {
- abm->funcs->set_pipe_ex(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst);
+ abm->funcs->set_pipe_ex(abm,
+ otg_inst,
+ SET_ABM_PIPE_NORMAL,
+ panel_cntl->inst,
+ panel_cntl->pwrseq_inst);
} else {
- dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst);
+ dmub_abm_set_pipe(abm,
+ otg_inst,
+ SET_ABM_PIPE_NORMAL,
+ panel_cntl->inst,
+ panel_cntl->pwrseq_inst);
}
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.c
index 18249c6b6d81..18249c6b6d81 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.h
index 3ed24292648a..3ed24292648a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.h
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
index d71faf2ecd41..c34c13e1e0a4 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
@@ -51,7 +51,7 @@
#include "dcn20/dcn20_hwseq.h"
#include "dcn30/dcn30_resource.h"
#include "link.h"
-
+#include "dc_state_priv.h"
@@ -367,6 +367,10 @@ void dcn30_enable_writeback(
DC_LOG_DWB("%s dwb_pipe_inst = %d, mpcc_inst = %d",\
__func__, wb_info->dwb_pipe_inst,\
wb_info->mpcc_inst);
+
+ /* Warmup interface */
+ dcn30_mmhubbub_warmup(dc, 1, wb_info);
+
/* Update writeback pipe */
dcn30_set_writeback(dc, wb_info, context);
@@ -472,6 +476,7 @@ void dcn30_init_hw(struct dc *dc)
int i;
int edp_num;
uint32_t backlight = MAX_BACKLIGHT_LEVEL;
+ uint32_t user_level = MAX_BACKLIGHT_LEVEL;
if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
@@ -608,13 +613,15 @@ void dcn30_init_hw(struct dc *dc)
for (i = 0; i < dc->link_count; i++) {
struct dc_link *link = dc->links[i];
- if (link->panel_cntl)
+ if (link->panel_cntl) {
backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
+ user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL;
+ }
}
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (abms[i] != NULL)
- abms[i]->funcs->abm_init(abms[i], backlight);
+ abms[i]->funcs->abm_init(abms[i], backlight, user_level);
}
/* power AFMT HDMI memory TODO: may move to dis/en output save power*/
@@ -750,7 +757,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
cmd.mall.header.sub_type = DMUB_CMD__MALL_ACTION_NO_DF_REQ;
cmd.mall.header.payload_bytes = sizeof(cmd.mall) - sizeof(cmd.mall.header);
- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
return true;
}
@@ -872,7 +879,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
cmd.mall.cursor_height = cursor_attr.height;
cmd.mall.cursor_pitch = cursor_attr.pitch;
- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
/* Use copied cursor, and it's okay to not switch back */
cursor_attr.address.quad_part = cmd.mall.cursor_copy_dst.quad_part;
@@ -888,7 +895,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
cmd.mall.tmr_scale = tmr_scale;
cmd.mall.debug_bits = dc->debug.mall_error_as_fatal;
- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
return true;
}
@@ -905,7 +912,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
cmd.mall.header.payload_bytes =
sizeof(cmd.mall) - sizeof(cmd.mall.header);
- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
return true;
}
@@ -962,7 +969,7 @@ void dcn30_hardware_release(struct dc *dc)
if (!pipe->stream)
continue;
- if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
+ if (dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_MAIN) {
subvp_in_use = true;
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
index 9894caedffed..9894caedffed 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.h
index c280ff90bfa3..c280ff90bfa3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c
index 6477009ce065..6477009ce065 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.h
index 0bca48ccbfa2..0bca48ccbfa2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn302/dcn302_init.c
index 637f9514d37b..637f9514d37b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn302/dcn302_init.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn302/dcn302_init.h
index 899587b93aa1..899587b93aa1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn302/dcn302_init.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_init.c
index edb4d68b8187..edb4d68b8187 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_init.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_init.h
index 4949981126d7..4949981126d7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_init.h
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
index 97798cee876e..7423880fabb6 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
@@ -96,7 +96,8 @@ static void enable_memory_low_power(struct dc *dc)
if (dc->debug.enable_mem_low_power.bits.vpg && dc->res_pool->stream_enc[0]->vpg->funcs->vpg_powerdown) {
// Power down VPGs
for (i = 0; i < dc->res_pool->stream_enc_count; i++)
- dc->res_pool->stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->stream_enc[i]->vpg);
+ if (dc->res_pool->stream_enc[i]->vpg)
+ dc->res_pool->stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->stream_enc[i]->vpg);
#if defined(CONFIG_DRM_AMD_DC_FP)
for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++)
dc->res_pool->hpo_dp_stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->hpo_dp_stream_enc[i]->vpg);
@@ -112,6 +113,7 @@ void dcn31_init_hw(struct dc *dc)
struct dc_bios *dcb = dc->ctx->dc_bios;
struct resource_pool *res_pool = dc->res_pool;
uint32_t backlight = MAX_BACKLIGHT_LEVEL;
+ uint32_t user_level = MAX_BACKLIGHT_LEVEL;
int i;
if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
@@ -223,13 +225,15 @@ void dcn31_init_hw(struct dc *dc)
for (i = 0; i < dc->link_count; i++) {
struct dc_link *link = dc->links[i];
- if (link->panel_cntl)
+ if (link->panel_cntl) {
backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
+ user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL;
+ }
}
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (abms[i] != NULL)
- abms[i]->funcs->abm_init(abms[i], backlight);
+ abms[i]->funcs->abm_init(abms[i], backlight, user_level);
}
/* power AFMT HDMI memory TODO: may move to dis/en output save power*/
@@ -415,7 +419,7 @@ void dcn31_z10_save_init(struct dc *dc)
cmd.dcn_restore.header.type = DMUB_CMD__IDLE_OPT;
cmd.dcn_restore.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_SAVE_INIT;
- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
void dcn31_z10_restore(const struct dc *dc)
@@ -433,7 +437,7 @@ void dcn31_z10_restore(const struct dc *dc)
cmd.dcn_restore.header.type = DMUB_CMD__IDLE_OPT;
cmd.dcn_restore.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_RESTORE;
- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
void dcn31_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on)
@@ -523,7 +527,8 @@ static void dcn31_reset_back_end_for_pipe(
if (pipe_ctx->stream_res.tg->funcs->set_odm_bypass)
pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
- pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
+ if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
if (pipe_ctx->stream_res.tg->funcs->set_drr)
pipe_ctx->stream_res.tg->funcs->set_drr(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
index 669f524bd064..669f524bd064 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.h
index a3db08c8bd35..a3db08c8bd35 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
index ccb7e317e86a..ccb7e317e86a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.h
index 8f92e66577cf..8f92e66577cf 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.h
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
index 5f7f474ef51c..6c9299c7683d 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
@@ -51,6 +51,7 @@
#include "dcn32/dcn32_resource.h"
#include "link.h"
#include "../dcn20/dcn20_hwseq.h"
+#include "dc_state_priv.h"
#define DC_LOGGER_INIT(logger)
@@ -277,7 +278,7 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
cmd.cab.header.sub_type = DMUB_CMD__CAB_NO_DCN_REQ;
cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header);
- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
return true;
}
@@ -311,7 +312,7 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header);
cmd.cab.cab_alloc_ways = (uint8_t)ways;
- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
return true;
}
@@ -327,7 +328,7 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
cmd.cab.header.payload_bytes =
sizeof(cmd.cab) - sizeof(cmd.cab.header);
- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
return true;
}
@@ -348,8 +349,7 @@ void dcn32_commit_subvp_config(struct dc *dc, struct dc_state *context)
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- if (pipe_ctx->stream && pipe_ctx->stream->mall_stream_config.paired_stream &&
- pipe_ctx->stream->mall_stream_config.type == SUBVP_MAIN) {
+ if (pipe_ctx->stream && dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_MAIN) {
// There is at least 1 SubVP pipe, so enable SubVP
enable_subvp = true;
break;
@@ -375,18 +375,20 @@ void dcn32_subvp_pipe_control_lock(struct dc *dc,
bool subvp_immediate_flip = false;
bool subvp_in_use = false;
struct pipe_ctx *pipe;
+ enum mall_stream_type pipe_mall_type = SUBVP_NONE;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
+ pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe);
- if (pipe->stream && pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
+ if (pipe->stream && pipe->plane_state && pipe_mall_type == SUBVP_MAIN) {
subvp_in_use = true;
break;
}
}
if (top_pipe_to_program && top_pipe_to_program->stream && top_pipe_to_program->plane_state) {
- if (top_pipe_to_program->stream->mall_stream_config.type == SUBVP_MAIN &&
+ if (dc_state_get_pipe_subvp_type(context, top_pipe_to_program) == SUBVP_MAIN &&
top_pipe_to_program->plane_state->flip_immediate)
subvp_immediate_flip = true;
}
@@ -398,7 +400,7 @@ void dcn32_subvp_pipe_control_lock(struct dc *dc,
if (!lock) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
- if (pipe->stream && pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_MAIN &&
+ if (pipe->stream && pipe->plane_state && pipe_mall_type == SUBVP_MAIN &&
should_lock_all_pipes)
pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK);
}
@@ -416,14 +418,7 @@ void dcn32_subvp_pipe_control_lock_fast(union block_sequence_params *params)
{
struct dc *dc = params->subvp_pipe_control_lock_fast_params.dc;
bool lock = params->subvp_pipe_control_lock_fast_params.lock;
- struct pipe_ctx *pipe_ctx = params->subvp_pipe_control_lock_fast_params.pipe_ctx;
- bool subvp_immediate_flip = false;
-
- if (pipe_ctx && pipe_ctx->stream && pipe_ctx->plane_state) {
- if (pipe_ctx->stream->mall_stream_config.type == SUBVP_MAIN &&
- pipe_ctx->plane_state->flip_immediate)
- subvp_immediate_flip = true;
- }
+ bool subvp_immediate_flip = params->subvp_pipe_control_lock_fast_params.subvp_immediate_flip;
// Don't need to lock for DRR VSYNC flips -- FW will wait for DRR pending update cleared.
if (subvp_immediate_flip) {
@@ -609,7 +604,7 @@ void dcn32_update_force_pstate(struct dc *dc, struct dc_state *context)
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
struct hubp *hubp = pipe->plane_res.hubp;
- if (!pipe->stream || !(pipe->stream->mall_stream_config.type == SUBVP_MAIN ||
+ if (!pipe->stream || !(dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN ||
pipe->stream->fpo_in_use)) {
if (hubp && hubp->funcs->hubp_update_force_pstate_disallow)
hubp->funcs->hubp_update_force_pstate_disallow(hubp, false);
@@ -624,7 +619,7 @@ void dcn32_update_force_pstate(struct dc *dc, struct dc_state *context)
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
struct hubp *hubp = pipe->plane_res.hubp;
- if (pipe->stream && (pipe->stream->mall_stream_config.type == SUBVP_MAIN ||
+ if (pipe->stream && (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN ||
pipe->stream->fpo_in_use)) {
if (hubp && hubp->funcs->hubp_update_force_pstate_disallow)
hubp->funcs->hubp_update_force_pstate_disallow(hubp, true);
@@ -671,8 +666,8 @@ void dcn32_update_mall_sel(struct dc *dc, struct dc_state *context)
if (cursor_size > 16384)
cache_cursor = true;
- if (pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
- hubp->funcs->hubp_update_mall_sel(hubp, 1, false);
+ if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
+ hubp->funcs->hubp_update_mall_sel(hubp, 1, false);
} else {
// MALL not supported with Stereo3D
hubp->funcs->hubp_update_mall_sel(hubp,
@@ -714,9 +709,8 @@ void dcn32_program_mall_pipe_config(struct dc *dc, struct dc_state *context)
* see if CURSOR_REQ_MODE will be back to 1 for SubVP
* when it should be 0 for MPO
*/
- if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
+ if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN)
hubp->funcs->hubp_prepare_subvp_buffering(hubp, true);
- }
}
}
}
@@ -759,6 +753,7 @@ void dcn32_init_hw(struct dc *dc)
int i;
int edp_num;
uint32_t backlight = MAX_BACKLIGHT_LEVEL;
+ uint32_t user_level = MAX_BACKLIGHT_LEVEL;
if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
@@ -913,13 +908,15 @@ void dcn32_init_hw(struct dc *dc)
for (i = 0; i < dc->link_count; i++) {
struct dc_link *link = dc->links[i];
- if (link->panel_cntl)
+ if (link->panel_cntl) {
backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
+ user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL;
+ }
}
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (abms[i] != NULL && abms[i]->funcs != NULL)
- abms[i]->funcs->abm_init(abms[i], backlight);
+ abms[i]->funcs->abm_init(abms[i], backlight, user_level);
}
/* power AFMT HDMI memory TODO: may move to dis/en output save power*/
@@ -960,6 +957,12 @@ void dcn32_init_hw(struct dc *dc)
dc->caps.dmub_caps.subvp_psr = dc->ctx->dmub_srv->dmub->feature_caps.subvp_psr_support;
dc->caps.dmub_caps.gecc_enable = dc->ctx->dmub_srv->dmub->feature_caps.gecc_enable;
dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch;
+
+ if (dc->ctx->dmub_srv->dmub->fw_version <
+ DMUB_FW_VERSION(7, 0, 35)) {
+ dc->debug.force_disable_subvp = true;
+ dc->debug.disable_fpo_optimizations = true;
+ }
}
}
@@ -989,9 +992,22 @@ static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream,
static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
{
struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
struct dc_stream_state *stream = pipe_ctx->stream;
struct pipe_ctx *odm_pipe;
int opp_cnt = 1;
+ struct dccg *dccg = dc->res_pool->dccg;
+ /* It has been found that when DSCCLK is lower than 16Mhz, we will get DCN
+ * register access hung. When DSCCLk is based on refclk, DSCCLk is always a
+ * fixed value higher than 16Mhz so the issue doesn't occur. When DSCCLK is
+ * generated by DTO, DSCCLK would be based on 1/3 dispclk. For small timings
+ * with DSC such as 480p60Hz, the dispclk could be low enough to trigger
+ * this problem. We are implementing a workaround here to keep using dscclk
+ * based on fixed value refclk when timing is smaller than 3x16Mhz (i.e
+ * 48Mhz) pixel clock to avoid hitting this problem.
+ */
+ bool should_use_dto_dscclk = (dccg->funcs->set_dto_dscclk != NULL) &&
+ stream->timing.pix_clk_100hz > 480000;
ASSERT(dsc);
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
@@ -1014,12 +1030,16 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg);
dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst);
+ if (should_use_dto_dscclk)
+ dccg->funcs->set_dto_dscclk(dccg, dsc->inst);
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc;
ASSERT(odm_dsc);
odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg);
odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst);
+ if (should_use_dto_dscclk)
+ dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst);
}
dsc_cfg.dc_dsc_cfg.num_slices_h *= opp_cnt;
dsc_cfg.pic_width *= opp_cnt;
@@ -1039,9 +1059,13 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
OPTC_DSC_DISABLED, 0, 0);
/* disable DSC block */
+ if (dccg->funcs->set_ref_dscclk)
+ dccg->funcs->set_ref_dscclk(dccg, pipe_ctx->stream_res.dsc->inst);
dsc->funcs->dsc_disable(pipe_ctx->stream_res.dsc);
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
ASSERT(odm_pipe->stream_res.dsc);
+ if (dccg->funcs->set_ref_dscclk)
+ dccg->funcs->set_ref_dscclk(dccg, odm_pipe->stream_res.dsc->inst);
odm_pipe->stream_res.dsc->funcs->dsc_disable(odm_pipe->stream_res.dsc);
}
}
@@ -1124,6 +1148,10 @@ void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *
if (!pipe_ctx->next_odm_pipe && current_pipe_ctx->next_odm_pipe &&
current_pipe_ctx->next_odm_pipe->stream_res.dsc) {
struct display_stream_compressor *dsc = current_pipe_ctx->next_odm_pipe->stream_res.dsc;
+ struct dccg *dccg = dc->res_pool->dccg;
+
+ if (dccg->funcs->set_ref_dscclk)
+ dccg->funcs->set_ref_dscclk(dccg, dsc->inst);
/* disconnect DSC block from stream */
dsc->funcs->dsc_disconnect(dsc);
}
@@ -1197,7 +1225,7 @@ void dcn32_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_
continue;
if ((pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))
- && pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) {
+ && dc_state_get_pipe_subvp_type(dc->current_state, pipe) != SUBVP_PHANTOM) {
pipe->stream_res.tg->funcs->disable_crtc(pipe->stream_res.tg);
reset_sync_context_for_pipe(dc, context, i);
otg_disabled[i] = true;
@@ -1348,8 +1376,8 @@ void dcn32_update_phantom_vp_position(struct dc *dc,
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_MAIN &&
- pipe->stream->mall_stream_config.paired_stream == phantom_pipe->stream) {
+ if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN &&
+ dc_state_get_paired_subvp_stream(context, pipe->stream) == phantom_pipe->stream) {
if (pipe->plane_state && pipe->plane_state->update_flags.bits.position_change) {
phantom_plane->src_rect.x = pipe->plane_state->src_rect.x;
@@ -1374,21 +1402,19 @@ void dcn32_update_phantom_vp_position(struct dc *dc,
void dcn32_apply_update_flags_for_phantom(struct pipe_ctx *phantom_pipe)
{
phantom_pipe->update_flags.raw = 0;
- if (phantom_pipe->stream && phantom_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
- if (resource_is_pipe_type(phantom_pipe, DPP_PIPE)) {
- phantom_pipe->update_flags.bits.enable = 1;
- phantom_pipe->update_flags.bits.mpcc = 1;
- phantom_pipe->update_flags.bits.dppclk = 1;
- phantom_pipe->update_flags.bits.hubp_interdependent = 1;
- phantom_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
- phantom_pipe->update_flags.bits.gamut_remap = 1;
- phantom_pipe->update_flags.bits.scaler = 1;
- phantom_pipe->update_flags.bits.viewport = 1;
- phantom_pipe->update_flags.bits.det_size = 1;
- if (resource_is_pipe_type(phantom_pipe, OTG_MASTER)) {
- phantom_pipe->update_flags.bits.odm = 1;
- phantom_pipe->update_flags.bits.global_sync = 1;
- }
+ if (resource_is_pipe_type(phantom_pipe, DPP_PIPE)) {
+ phantom_pipe->update_flags.bits.enable = 1;
+ phantom_pipe->update_flags.bits.mpcc = 1;
+ phantom_pipe->update_flags.bits.dppclk = 1;
+ phantom_pipe->update_flags.bits.hubp_interdependent = 1;
+ phantom_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
+ phantom_pipe->update_flags.bits.gamut_remap = 1;
+ phantom_pipe->update_flags.bits.scaler = 1;
+ phantom_pipe->update_flags.bits.viewport = 1;
+ phantom_pipe->update_flags.bits.det_size = 1;
+ if (resource_is_pipe_type(phantom_pipe, OTG_MASTER)) {
+ phantom_pipe->update_flags.bits.odm = 1;
+ phantom_pipe->update_flags.bits.global_sync = 1;
}
}
}
@@ -1460,8 +1486,8 @@ void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context)
* pipe, wait for the double buffer update to complete first before we do
* ANY phantom pipe programming.
*/
- if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM &&
- old_pipe->stream && old_pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) {
+ if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM &&
+ old_pipe->stream && dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) != SUBVP_PHANTOM) {
old_pipe->stream_res.tg->funcs->wait_for_state(
old_pipe->stream_res.tg,
CRTC_STATE_VBLANK);
@@ -1473,7 +1499,7 @@ void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context)
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
- if (new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ if (new_pipe->stream && dc_state_get_pipe_subvp_type(context, new_pipe) == SUBVP_PHANTOM) {
// If old context or new context has phantom pipes, apply
// the phantom timings now. We can't change the phantom
// pipe configuration safely without driver acquiring
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
index 427cfc8c24a4..427cfc8c24a4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.h
index 89a591eb2c23..89a591eb2c23 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.h
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
index 39260371beb9..9c806385ecbd 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
@@ -56,6 +56,7 @@
#include "dcn30/dcn30_cm_common.h"
#include "dcn31/dcn31_hwseq.h"
#include "dcn20/dcn20_hwseq.h"
+#include "dc_state_priv.h"
#define DC_LOGGER_INIT(logger) \
struct dal_logger *dc_logger = logger
@@ -133,6 +134,7 @@ void dcn35_init_hw(struct dc *dc)
struct dc_bios *dcb = dc->ctx->dc_bios;
struct resource_pool *res_pool = dc->res_pool;
uint32_t backlight = MAX_BACKLIGHT_LEVEL;
+ uint32_t user_level = MAX_BACKLIGHT_LEVEL;
int i;
if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
@@ -279,13 +281,15 @@ void dcn35_init_hw(struct dc *dc)
for (i = 0; i < dc->link_count; i++) {
struct dc_link *link = dc->links[i];
- if (link->panel_cntl)
+ if (link->panel_cntl) {
backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
+ user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL;
+ }
}
if (dc->ctx->dmub_srv) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (abms[i] != NULL && abms[i]->funcs != NULL)
- abms[i]->funcs->abm_init(abms[i], backlight);
+ abms[i]->funcs->abm_init(abms[i], backlight, user_level);
}
}
@@ -687,11 +691,7 @@ bool dcn35_apply_idle_power_optimizations(struct dc *dc, bool enable)
}
// TODO: review other cases when idle optimization is allowed
-
- if (!enable)
- dc_dmub_srv_exit_low_power_state(dc);
- else
- dc_dmub_srv_notify_idle(dc, enable);
+ dc_dmub_srv_apply_idle_power_optimizations(dc, enable);
return true;
}
@@ -701,7 +701,7 @@ void dcn35_z10_restore(const struct dc *dc)
if (dc->debug.disable_z10)
return;
- dc_dmub_srv_exit_low_power_state(dc);
+ dc_dmub_srv_apply_idle_power_optimizations(dc, false);
dcn31_z10_restore(dc);
}
@@ -817,12 +817,12 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context)
dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
- hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
+ hws->funcs.plane_atomic_disconnect(dc, context, pipe_ctx);
if (tg->funcs->is_tg_enabled(tg))
tg->funcs->unlock(tg);
- dc->hwss.disable_plane(dc, pipe_ctx);
+ dc->hwss.disable_plane(dc, context, pipe_ctx);
pipe_ctx->stream_res.tg = NULL;
pipe_ctx->plane_res.hubp = NULL;
@@ -949,10 +949,10 @@ void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_state = NULL;
}
-void dcn35_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn35_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx)
{
struct dce_hwseq *hws = dc->hwseq;
- bool is_phantom = pipe_ctx->plane_state && pipe_ctx->plane_state->is_phantom;
+ bool is_phantom = dc_state_get_pipe_subvp_type(state, pipe_ctx) == SUBVP_PHANTOM;
struct timing_generator *tg = is_phantom ? pipe_ctx->stream_res.tg : NULL;
DC_LOGGER_INIT(dc->ctx->logger);
@@ -979,6 +979,8 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
bool hpo_frl_stream_enc_acquired = false;
bool hpo_dp_stream_enc_acquired = false;
int i = 0, j = 0;
+ int edp_num = 0;
+ struct dc_link *edp_links[MAX_NUM_EDP] = { NULL };
memset(update_state, 0, sizeof(struct pg_block_update));
@@ -1019,10 +1021,24 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
if (pipe_ctx->stream_res.opp)
update_state->pg_pipe_res_update[PG_OPP][pipe_ctx->stream_res.opp->inst] = false;
+ }
+ /*domain24 controls all the otg, mpc, opp, as long as one otg is still up, avoid enabling OTG PG*/
+ for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
+ struct timing_generator *tg = dc->res_pool->timing_generators[i];
+ if (tg && tg->funcs->is_tg_enabled(tg)) {
+ update_state->pg_pipe_res_update[PG_OPTC][i] = false;
+ break;
+ }
+ }
- if (pipe_ctx->stream_res.tg)
- update_state->pg_pipe_res_update[PG_OPTC][pipe_ctx->stream_res.tg->inst] = false;
+ dc_get_edp_links(dc, edp_links, &edp_num);
+ if (edp_num == 0 ||
+ ((!edp_links[0] || !edp_links[0]->edp_sink_present) &&
+ (!edp_links[1] || !edp_links[1]->edp_sink_present))) {
+ /*eDP not exist on this config, keep Domain24 power on, for S0i3, this will be handled in dmubfw*/
+ update_state->pg_pipe_res_update[PG_OPTC][0] = false;
}
+
}
void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context,
@@ -1108,8 +1124,29 @@ void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context,
}
-void dcn35_block_power_control(struct dc *dc,
- struct pg_block_update *update_state, bool power_on)
+/**
+ * dcn35_hw_block_power_down() - power down sequence
+ *
+ * The following sequence describes the ON-OFF (ONO) for power down:
+ *
+ * ONO Region 3, DCPG 25: hpo - SKIPPED
+ * ONO Region 4, DCPG 0: dchubp0, dpp0
+ * ONO Region 6, DCPG 1: dchubp1, dpp1
+ * ONO Region 8, DCPG 2: dchubp2, dpp2
+ * ONO Region 10, DCPG 3: dchubp3, dpp3
+ * ONO Region 1, DCPG 23: dchubbub dchvm dchubbubmem - SKIPPED. PMFW will pwr dwn at IPS2 entry
+ * ONO Region 5, DCPG 16: dsc0
+ * ONO Region 7, DCPG 17: dsc1
+ * ONO Region 9, DCPG 18: dsc2
+ * ONO Region 11, DCPG 19: dsc3
+ * ONO Region 2, DCPG 24: mpc opp optc dwb
+ * ONO Region 0, DCPG 22: dccg dio dcio - SKIPPED. will be pwr dwn after lono timer is armed
+ *
+ * @dc: Current DC state
+ * @update_state: update PG sequence states for HW block
+ */
+void dcn35_hw_block_power_down(struct dc *dc,
+ struct pg_block_update *update_state)
{
int i = 0;
struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl;
@@ -1118,64 +1155,106 @@ void dcn35_block_power_control(struct dc *dc,
return;
if (dc->debug.ignore_pg)
return;
+
if (update_state->pg_res_update[PG_HPO]) {
if (pg_cntl->funcs->hpo_pg_control)
- pg_cntl->funcs->hpo_pg_control(pg_cntl, power_on);
+ pg_cntl->funcs->hpo_pg_control(pg_cntl, false);
}
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
update_state->pg_pipe_res_update[PG_DPP][i]) {
if (pg_cntl->funcs->hubp_dpp_pg_control)
- pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, power_on);
+ pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, false);
}
-
+ }
+ for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++)
if (update_state->pg_pipe_res_update[PG_DSC][i]) {
if (pg_cntl->funcs->dsc_pg_control)
- pg_cntl->funcs->dsc_pg_control(pg_cntl, i, power_on);
+ pg_cntl->funcs->dsc_pg_control(pg_cntl, i, false);
}
- if (update_state->pg_pipe_res_update[PG_MPCC][i]) {
- if (pg_cntl->funcs->mpcc_pg_control)
- pg_cntl->funcs->mpcc_pg_control(pg_cntl, i, power_on);
- }
-
- if (update_state->pg_pipe_res_update[PG_OPP][i]) {
- if (pg_cntl->funcs->opp_pg_control)
- pg_cntl->funcs->opp_pg_control(pg_cntl, i, power_on);
- }
- if (update_state->pg_pipe_res_update[PG_OPTC][i]) {
- if (pg_cntl->funcs->optc_pg_control)
- pg_cntl->funcs->optc_pg_control(pg_cntl, i, power_on);
- }
- }
+ /*this will need all the clients to unregister optc interruts let dmubfw handle this*/
+ if (pg_cntl->funcs->plane_otg_pg_control)
+ pg_cntl->funcs->plane_otg_pg_control(pg_cntl, false);
- if (update_state->pg_res_update[PG_DWB]) {
- if (pg_cntl->funcs->dwb_pg_control)
- pg_cntl->funcs->dwb_pg_control(pg_cntl, power_on);
- }
+ //domain22, 23, 25 currently always on.
- if (pg_cntl->funcs->plane_otg_pg_control)
- pg_cntl->funcs->plane_otg_pg_control(pg_cntl, power_on);
}
-void dcn35_root_clock_control(struct dc *dc,
- struct pg_block_update *update_state, bool power_on)
+/**
+ * dcn35_hw_block_power_up() - power up sequence
+ *
+ * The following sequence describes the ON-OFF (ONO) for power up:
+ *
+ * ONO Region 0, DCPG 22: dccg dio dcio - SKIPPED
+ * ONO Region 2, DCPG 24: mpc opp optc dwb
+ * ONO Region 5, DCPG 16: dsc0
+ * ONO Region 7, DCPG 17: dsc1
+ * ONO Region 9, DCPG 18: dsc2
+ * ONO Region 11, DCPG 19: dsc3
+ * ONO Region 1, DCPG 23: dchubbub dchvm dchubbubmem - SKIPPED. PMFW will power up at IPS2 exit
+ * ONO Region 4, DCPG 0: dchubp0, dpp0
+ * ONO Region 6, DCPG 1: dchubp1, dpp1
+ * ONO Region 8, DCPG 2: dchubp2, dpp2
+ * ONO Region 10, DCPG 3: dchubp3, dpp3
+ * ONO Region 3, DCPG 25: hpo - SKIPPED
+ *
+ * @dc: Current DC state
+ * @update_state: update PG sequence states for HW block
+ */
+void dcn35_hw_block_power_up(struct dc *dc,
+ struct pg_block_update *update_state)
{
int i = 0;
struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl;
if (!pg_cntl)
return;
+ if (dc->debug.ignore_pg)
+ return;
+ //domain22, 23, 25 currently always on.
+ /*this will need all the clients to unregister optc interruts let dmubfw handle this*/
+ if (pg_cntl->funcs->plane_otg_pg_control)
+ pg_cntl->funcs->plane_otg_pg_control(pg_cntl, true);
+
+ for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++)
+ if (update_state->pg_pipe_res_update[PG_DSC][i]) {
+ if (pg_cntl->funcs->dsc_pg_control)
+ pg_cntl->funcs->dsc_pg_control(pg_cntl, i, true);
+ }
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
update_state->pg_pipe_res_update[PG_DPP][i]) {
- if (dc->hwseq->funcs.dpp_root_clock_control)
- dc->hwseq->funcs.dpp_root_clock_control(dc->hwseq, i, power_on);
+ if (pg_cntl->funcs->hubp_dpp_pg_control)
+ pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, true);
}
+ }
+ if (update_state->pg_res_update[PG_HPO]) {
+ if (pg_cntl->funcs->hpo_pg_control)
+ pg_cntl->funcs->hpo_pg_control(pg_cntl, true);
+ }
+}
+void dcn35_root_clock_control(struct dc *dc,
+ struct pg_block_update *update_state, bool power_on)
+{
+ int i = 0;
+ struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl;
+ if (!pg_cntl)
+ return;
+ /*enable root clock first when power up*/
+ if (power_on)
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
+ update_state->pg_pipe_res_update[PG_DPP][i]) {
+ if (dc->hwseq->funcs.dpp_root_clock_control)
+ dc->hwseq->funcs.dpp_root_clock_control(dc->hwseq, i, power_on);
+ }
+ }
+ for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
if (update_state->pg_pipe_res_update[PG_DSC][i]) {
if (power_on) {
if (dc->res_pool->dccg->funcs->enable_dsc)
@@ -1186,6 +1265,15 @@ void dcn35_root_clock_control(struct dc *dc,
}
}
}
+ /*disable root clock first when power down*/
+ if (!power_on)
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
+ update_state->pg_pipe_res_update[PG_DPP][i]) {
+ if (dc->hwseq->funcs.dpp_root_clock_control)
+ dc->hwseq->funcs.dpp_root_clock_control(dc->hwseq, i, power_on);
+ }
+ }
}
void dcn35_prepare_bandwidth(
@@ -1199,9 +1287,9 @@ void dcn35_prepare_bandwidth(
if (dc->hwss.root_clock_control)
dc->hwss.root_clock_control(dc, &pg_update_state, true);
-
- if (dc->hwss.block_power_control)
- dc->hwss.block_power_control(dc, &pg_update_state, true);
+ /*power up required HW block*/
+ if (dc->hwss.hw_block_power_up)
+ dc->hwss.hw_block_power_up(dc, &pg_update_state);
}
dcn20_prepare_bandwidth(dc, context);
@@ -1217,9 +1305,9 @@ void dcn35_optimize_bandwidth(
if (dc->hwss.calc_blocks_to_gate) {
dc->hwss.calc_blocks_to_gate(dc, context, &pg_update_state);
-
- if (dc->hwss.block_power_control)
- dc->hwss.block_power_control(dc, &pg_update_state, false);
+ /*try to power down unused block*/
+ if (dc->hwss.hw_block_power_down)
+ dc->hwss.hw_block_power_down(dc, &pg_update_state);
if (dc->hwss.root_clock_control)
dc->hwss.root_clock_control(dc, &pg_update_state, false);
@@ -1241,3 +1329,44 @@ uint32_t dcn35_get_idle_state(const struct dc *dc)
return 0;
}
+
+void dcn35_set_drr(struct pipe_ctx **pipe_ctx,
+ int num_pipes, struct dc_crtc_timing_adjust adjust)
+{
+ int i = 0;
+ struct drr_params params = {0};
+ // DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
+ unsigned int event_triggers = 0x800;
+ // Note DRR trigger events are generated regardless of whether num frames met.
+ unsigned int num_frames = 2;
+
+ params.vertical_total_max = adjust.v_total_max;
+ params.vertical_total_min = adjust.v_total_min;
+ params.vertical_total_mid = adjust.v_total_mid;
+ params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num;
+
+ for (i = 0; i < num_pipes; i++) {
+ if ((pipe_ctx[i]->stream_res.tg != NULL) && pipe_ctx[i]->stream_res.tg->funcs) {
+ struct dc_crtc_timing *timing = &pipe_ctx[i]->stream->timing;
+ struct dc *dc = pipe_ctx[i]->stream->ctx->dc;
+
+ if (dc->debug.static_screen_wait_frames) {
+ unsigned int frame_rate = timing->pix_clk_100hz / (timing->h_total * timing->v_total);
+
+ if (frame_rate >= 120 && dc->caps.ips_support &&
+ dc->config.disable_ips != DMUB_IPS_DISABLE_ALL) {
+ /*ips enable case*/
+ num_frames = 2 * (frame_rate % 60);
+ }
+ }
+ if (pipe_ctx[i]->stream_res.tg->funcs->set_drr)
+ pipe_ctx[i]->stream_res.tg->funcs->set_drr(
+ pipe_ctx[i]->stream_res.tg, &params);
+ if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
+ if (pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control)
+ pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
+ pipe_ctx[i]->stream_res.tg,
+ event_triggers, num_frames);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
index 0dff10d179b8..fd66316e33de 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
@@ -57,14 +57,16 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context);
void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx);
void dcn35_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx,
struct dc_state *context);
-void dcn35_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn35_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx);
void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
struct pg_block_update *update_state);
void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context,
struct pg_block_update *update_state);
-void dcn35_block_power_control(struct dc *dc,
- struct pg_block_update *update_state, bool power_on);
+void dcn35_hw_block_power_up(struct dc *dc,
+ struct pg_block_update *update_state);
+void dcn35_hw_block_power_down(struct dc *dc,
+ struct pg_block_update *update_state);
void dcn35_root_clock_control(struct dc *dc,
struct pg_block_update *update_state, bool power_on);
@@ -84,4 +86,8 @@ void dcn35_dsc_pg_control(
void dcn35_set_idle_state(const struct dc *dc, bool allow_idle);
uint32_t dcn35_get_idle_state(const struct dc *dc);
+
+void dcn35_set_drr(struct pipe_ctx **pipe_ctx,
+ int num_pipes, struct dc_crtc_timing_adjust adjust);
+
#endif /* __DC_HWSS_DCN35_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
index 296bf3a38cb9..a630aa77dcec 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
@@ -68,7 +68,7 @@ static const struct hw_sequencer_funcs dcn35_funcs = {
.prepare_bandwidth = dcn35_prepare_bandwidth,
.optimize_bandwidth = dcn35_optimize_bandwidth,
.update_bandwidth = dcn20_update_bandwidth,
- .set_drr = dcn10_set_drr,
+ .set_drr = dcn35_set_drr,
.get_position = dcn10_get_position,
.set_static_screen_control = dcn30_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
@@ -118,7 +118,8 @@ static const struct hw_sequencer_funcs dcn35_funcs = {
.update_dsc_pg = dcn32_update_dsc_pg,
.calc_blocks_to_gate = dcn35_calc_blocks_to_gate,
.calc_blocks_to_ungate = dcn35_calc_blocks_to_ungate,
- .block_power_control = dcn35_block_power_control,
+ .hw_block_power_up = dcn35_hw_block_power_up,
+ .hw_block_power_down = dcn35_hw_block_power_down,
.root_clock_control = dcn35_root_clock_control,
.set_idle_state = dcn35_set_idle_state,
.get_idle_state = dcn35_get_idle_state
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.h
index b67015032c35..b67015032c35 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.h
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/CMakeLists.txt b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/CMakeLists.txt
new file mode 100644
index 000000000000..951ca2da4486
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/CMakeLists.txt
@@ -0,0 +1,4 @@
+dal3_subdirectory_sources(
+ dcn351_init.c
+ dcn351_init.h
+)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/Makefile b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/Makefile
new file mode 100644
index 000000000000..b24ad27fe6ef
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/Makefile
@@ -0,0 +1,17 @@
+#
+# (c) Copyright 2022 Advanced Micro Devices, Inc. All the rights reserved
+#
+# All rights reserved. This notice is intended as a precaution against
+# inadvertent publication and does not imply publication or any waiver
+# of confidentiality. The year included in the foregoing notice is the
+# year of creation of the work.
+#
+# Authors: AMD
+#
+# Makefile for DCN351.
+
+DCN351 = dcn351_init.o
+
+AMD_DAL_DCN351 = $(addprefix $(AMDDALPATH)/dc/dcn351/,$(DCN351))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DCN351)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
new file mode 100644
index 000000000000..143d3fc0221c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce110/dce110_hwseq.h"
+#include "dcn10/dcn10_hwseq.h"
+#include "dcn20/dcn20_hwseq.h"
+#include "dcn21/dcn21_hwseq.h"
+#include "dcn30/dcn30_hwseq.h"
+#include "dcn301/dcn301_hwseq.h"
+#include "dcn31/dcn31_hwseq.h"
+#include "dcn32/dcn32_hwseq.h"
+#include "dcn35/dcn35_hwseq.h"
+
+#include "dcn351_init.h"
+
+static const struct hw_sequencer_funcs dcn351_funcs = {
+ .program_gamut_remap = dcn30_program_gamut_remap,
+ .init_hw = dcn35_init_hw,
+ .power_down_on_boot = dcn35_power_down_on_boot,
+ .apply_ctx_to_hw = dce110_apply_ctx_to_hw,
+ .apply_ctx_for_surface = NULL,
+ .program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+ .wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
+ .post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
+ .update_plane_addr = dcn20_update_plane_addr,
+ .update_dchub = dcn10_update_dchub,
+ .update_pending_status = dcn10_update_pending_status,
+ .program_output_csc = dcn20_program_output_csc,
+ .enable_accelerated_mode = dce110_enable_accelerated_mode,
+ .enable_timing_synchronization = dcn10_enable_timing_synchronization,
+ .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset,
+ .update_info_frame = dcn31_update_info_frame,
+ .send_immediate_sdp_message = dcn10_send_immediate_sdp_message,
+ .enable_stream = dcn20_enable_stream,
+ .disable_stream = dce110_disable_stream,
+ .unblank_stream = dcn32_unblank_stream,
+ .blank_stream = dce110_blank_stream,
+ .enable_audio_stream = dce110_enable_audio_stream,
+ .disable_audio_stream = dce110_disable_audio_stream,
+ .disable_plane = dcn35_disable_plane,
+ .disable_pixel_data = dcn20_disable_pixel_data,
+ .pipe_control_lock = dcn20_pipe_control_lock,
+ .interdependent_update_lock = dcn10_lock_all_pipes,
+ .cursor_lock = dcn10_cursor_lock,
+ .prepare_bandwidth = dcn35_prepare_bandwidth,
+ .optimize_bandwidth = dcn35_optimize_bandwidth,
+ .update_bandwidth = dcn20_update_bandwidth,
+ .set_drr = dcn10_set_drr,
+ .get_position = dcn10_get_position,
+ .set_static_screen_control = dcn30_set_static_screen_control,
+ .setup_stereo = dcn10_setup_stereo,
+ .set_avmute = dcn30_set_avmute,
+ .log_hw_state = dcn10_log_hw_state,
+ .get_hw_state = dcn10_get_hw_state,
+ .clear_status_bits = dcn10_clear_status_bits,
+ .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
+ .edp_backlight_control = dce110_edp_backlight_control,
+ .edp_power_control = dce110_edp_power_control,
+ .edp_wait_for_T12 = dce110_edp_wait_for_T12,
+ .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
+ .set_cursor_position = dcn10_set_cursor_position,
+ .set_cursor_attribute = dcn10_set_cursor_attribute,
+ .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
+ .setup_periodic_interrupt = dcn10_setup_periodic_interrupt,
+ .set_clock = dcn10_set_clock,
+ .get_clock = dcn10_get_clock,
+ .program_triplebuffer = dcn20_program_triple_buffer,
+ .enable_writeback = dcn30_enable_writeback,
+ .disable_writeback = dcn30_disable_writeback,
+ .update_writeback = dcn30_update_writeback,
+ .mmhubbub_warmup = dcn30_mmhubbub_warmup,
+ .dmdata_status_done = dcn20_dmdata_status_done,
+ .program_dmdata_engine = dcn30_program_dmdata_engine,
+ .set_dmdata_attributes = dcn20_set_dmdata_attributes,
+ .init_sys_ctx = dcn31_init_sys_ctx,
+ .init_vm_ctx = dcn20_init_vm_ctx,
+ .set_flip_control_gsl = dcn20_set_flip_control_gsl,
+ .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
+ .calc_vupdate_position = dcn10_calc_vupdate_position,
+ .power_down = dce110_power_down,
+ .set_backlight_level = dcn21_set_backlight_level,
+ .set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
+ .set_pipe = dcn21_set_pipe,
+ .enable_lvds_link_output = dce110_enable_lvds_link_output,
+ .enable_tmds_link_output = dce110_enable_tmds_link_output,
+ .enable_dp_link_output = dce110_enable_dp_link_output,
+ .disable_link_output = dcn32_disable_link_output,
+ .z10_restore = dcn35_z10_restore,
+ .z10_save_init = dcn31_z10_save_init,
+ .set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
+ .optimize_pwr_state = dcn21_optimize_pwr_state,
+ .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
+ .update_visual_confirm_color = dcn10_update_visual_confirm_color,
+ .apply_idle_power_optimizations = dcn35_apply_idle_power_optimizations,
+ .update_dsc_pg = dcn32_update_dsc_pg,
+ .calc_blocks_to_gate = dcn35_calc_blocks_to_gate,
+ .calc_blocks_to_ungate = dcn35_calc_blocks_to_ungate,
+ .hw_block_power_up = dcn35_hw_block_power_up,
+ .hw_block_power_down = dcn35_hw_block_power_down,
+ .root_clock_control = dcn35_root_clock_control,
+ .set_idle_state = dcn35_set_idle_state,
+ .get_idle_state = dcn35_get_idle_state
+};
+
+static const struct hwseq_private_funcs dcn351_private_funcs = {
+ .init_pipes = dcn35_init_pipes,
+ .update_plane_addr = dcn20_update_plane_addr,
+ .plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
+ .update_mpcc = dcn20_update_mpcc,
+ .set_input_transfer_func = dcn32_set_input_transfer_func,
+ .set_output_transfer_func = dcn32_set_output_transfer_func,
+ .power_down = dce110_power_down,
+ .enable_display_power_gating = dcn10_dummy_display_power_gating,
+ .blank_pixel_data = dcn20_blank_pixel_data,
+ .reset_hw_ctx_wrap = dcn31_reset_hw_ctx_wrap,
+ .enable_stream_timing = dcn20_enable_stream_timing,
+ .edp_backlight_control = dce110_edp_backlight_control,
+ .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
+ .did_underflow_occur = dcn10_did_underflow_occur,
+ .init_blank = dcn20_init_blank,
+ .disable_vga = NULL,
+ .bios_golden_init = dcn10_bios_golden_init,
+ .plane_atomic_disable = dcn35_plane_atomic_disable,
+ //.plane_atomic_disable = dcn20_plane_atomic_disable,/*todo*/
+ //.hubp_pg_control = dcn35_hubp_pg_control,
+ .enable_power_gating_plane = dcn35_enable_power_gating_plane,
+ .dpp_root_clock_control = dcn35_dpp_root_clock_control,
+ .program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree,
+ .update_odm = dcn35_update_odm,
+ .set_hdr_multiplier = dcn10_set_hdr_multiplier,
+ .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high,
+ .wait_for_blank_complete = dcn20_wait_for_blank_complete,
+ .dccg_init = dcn20_dccg_init,
+ .set_mcm_luts = dcn32_set_mcm_luts,
+ .setup_hpo_hw_control = dcn35_setup_hpo_hw_control,
+ .calculate_dccg_k1_k2_values = dcn32_calculate_dccg_k1_k2_values,
+ .set_pixels_per_cycle = dcn32_set_pixels_per_cycle,
+ .is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy,
+ .dsc_pg_control = dcn35_dsc_pg_control,
+ .dsc_pg_status = dcn32_dsc_pg_status,
+ .enable_plane = dcn35_enable_plane,
+};
+
+void dcn351_hw_sequencer_construct(struct dc *dc)
+{
+ dc->hwss = dcn351_funcs;
+ dc->hwseq->funcs = dcn351_private_funcs;
+
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.h
new file mode 100644
index 000000000000..970b01008b23
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_DCN351_INIT_H__
+#define __DC_DCN351_INIT_H__
+
+struct dc;
+
+void dcn351_hw_sequencer_construct(struct dc *dc);
+
+#endif /* __DC_DCN351_INIT_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
index 452680fe9aab..a54399383318 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
@@ -50,7 +50,7 @@ struct pg_block_update;
struct subvp_pipe_control_lock_fast_params {
struct dc *dc;
bool lock;
- struct pipe_ctx *pipe_ctx;
+ bool subvp_immediate_flip;
};
struct pipe_control_lock_params {
@@ -200,7 +200,7 @@ struct hw_sequencer_funcs {
struct dc_state *context);
enum dc_status (*apply_ctx_to_hw)(struct dc *dc,
struct dc_state *context);
- void (*disable_plane)(struct dc *dc, struct pipe_ctx *pipe_ctx);
+ void (*disable_plane)(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx);
void (*disable_pixel_data)(struct dc *dc, struct pipe_ctx *pipe_ctx, bool blank);
void (*apply_ctx_for_surface)(struct dc *dc,
const struct dc_stream_state *stream,
@@ -248,6 +248,7 @@ struct hw_sequencer_funcs {
void (*enable_per_frame_crtc_position_reset)(struct dc *dc,
int group_size, struct pipe_ctx *grouped_pipes[]);
void (*enable_timing_synchronization)(struct dc *dc,
+ struct dc_state *state,
int group_index, int group_size,
struct pipe_ctx *grouped_pipes[]);
void (*enable_vblanks_synchronization)(struct dc *dc,
@@ -414,8 +415,10 @@ struct hw_sequencer_funcs {
struct pg_block_update *update_state);
void (*calc_blocks_to_ungate)(struct dc *dc, struct dc_state *context,
struct pg_block_update *update_state);
- void (*block_power_control)(struct dc *dc,
- struct pg_block_update *update_state, bool power_on);
+ void (*hw_block_power_up)(struct dc *dc,
+ struct pg_block_update *update_state);
+ void (*hw_block_power_down)(struct dc *dc,
+ struct pg_block_update *update_state);
void (*root_clock_control)(struct dc *dc,
struct pg_block_update *update_state, bool power_on);
void (*set_idle_state)(const struct dc *dc, bool allow_idle);
@@ -452,17 +455,18 @@ void get_mpctree_visual_confirm_color(
struct tg_color *color);
void get_subvp_visual_confirm_color(
- struct dc *dc,
- struct dc_state *context,
struct pipe_ctx *pipe_ctx,
struct tg_color *color);
void get_mclk_switch_visual_confirm_color(
- struct dc *dc,
- struct dc_state *context,
struct pipe_ctx *pipe_ctx,
struct tg_color *color);
+void set_p_state_switch_method(
+ struct dc *dc,
+ struct dc_state *context,
+ struct pipe_ctx *pipe_ctx);
+
void hwss_execute_sequence(struct dc *dc,
struct block_sequence block_sequence[],
int num_steps);
@@ -472,7 +476,8 @@ void hwss_build_fast_sequence(struct dc *dc,
unsigned int dmub_cmd_count,
struct block_sequence block_sequence[],
int *num_steps,
- struct pipe_ctx *pipe_ctx);
+ struct pipe_ctx *pipe_ctx,
+ struct dc_stream_status *stream_status);
void hwss_send_dmcub_cmd(union block_sequence_params *params);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
index 82c592166875..6137cf09aa54 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
@@ -79,6 +79,7 @@ struct hwseq_private_funcs {
void (*update_plane_addr)(const struct dc *dc,
struct pipe_ctx *pipe_ctx);
void (*plane_atomic_disconnect)(struct dc *dc,
+ struct dc_state *state,
struct pipe_ctx *pipe_ctx);
void (*update_mpcc)(struct dc *dc, struct pipe_ctx *pipe_ctx);
bool (*set_input_transfer_func)(struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 10397d4dfb07..f74ae0d41d3c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -200,11 +200,7 @@ struct resource_funcs {
unsigned int pipe_cnt,
unsigned int index);
- bool (*remove_phantom_pipes)(struct dc *dc, struct dc_state *context, bool fast_update);
- void (*retain_phantom_pipes)(struct dc *dc, struct dc_state *context);
void (*get_panel_config_defaults)(struct dc_panel_config *panel_config);
- void (*save_mall_state)(struct dc *dc, struct dc_state *context, struct mall_temp_config *temp_config);
- void (*restore_mall_state)(struct dc *dc, struct dc_state *context, struct mall_temp_config *temp_config);
void (*build_pipe_pix_clk_params)(struct pipe_ctx *pipe_ctx);
};
@@ -385,6 +381,16 @@ union pipe_update_flags {
uint32_t raw;
};
+enum p_state_switch_method {
+ P_STATE_UNKNOWN = 0,
+ P_STATE_V_BLANK = 1,
+ P_STATE_FPO,
+ P_STATE_V_ACTIVE,
+ P_STATE_SUB_VP,
+ P_STATE_DRR_SUB_VP,
+ P_STATE_V_BLANK_SUB_VP
+};
+
struct pipe_ctx {
struct dc_plane_state *plane_state;
struct dc_stream_state *stream;
@@ -433,6 +439,7 @@ struct pipe_ctx {
struct dwbc *dwbc;
struct mcif_wb *mcif_wb;
union pipe_update_flags update_flags;
+ enum p_state_switch_method p_state_type;
struct tg_color visual_confirm_color;
bool has_vactive_margin;
/* subvp_index: only valid if the pipe is a SUBVP_MAIN*/
@@ -526,6 +533,14 @@ struct dc_state {
* @stream_status: Planes status on a given stream
*/
struct dc_stream_status stream_status[MAX_PIPES];
+ /**
+ * @phantom_streams: Stream state properties for phantoms
+ */
+ struct dc_stream_state *phantom_streams[MAX_PHANTOM_PIPES];
+ /**
+ * @phantom_planes: Planes state properties for phantoms
+ */
+ struct dc_plane_state *phantom_planes[MAX_PHANTOM_PIPES];
/**
* @stream_count: Total of streams in use
@@ -534,6 +549,14 @@ struct dc_state {
uint8_t stream_mask;
/**
+ * @stream_count: Total phantom streams in use
+ */
+ uint8_t phantom_stream_count;
+ /**
+ * @stream_count: Total phantom planes in use
+ */
+ uint8_t phantom_plane_count;
+ /**
* @res_ctx: Persistent state of resources
*/
struct resource_context res_ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
index 33db15d69f23..3f0161d64675 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
@@ -36,7 +36,7 @@ struct abm {
};
struct abm_funcs {
- void (*abm_init)(struct abm *abm, uint32_t back_light);
+ void (*abm_init)(struct abm *abm, uint32_t back_light, uint32_t user_level);
bool (*set_abm_level)(struct abm *abm, unsigned int abm_level);
bool (*set_abm_immediate_disable)(struct abm *abm, unsigned int panel_inst);
bool (*set_pipe)(struct abm *abm, unsigned int controller_id, unsigned int panel_inst);
@@ -64,7 +64,8 @@ struct abm_funcs {
bool (*set_pipe_ex)(struct abm *abm,
unsigned int otg_inst,
unsigned int option,
- unsigned int panel_inst);
+ unsigned int panel_inst,
+ unsigned int pwrseq_inst);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index fa9614bcb160..17e014d3bdc8 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -62,6 +62,25 @@ struct dcn3_clk_internal {
uint32_t CLK4_CLK0_CURRENT_CNT; //fclk
};
+struct dcn35_clk_internal {
+ int dummy;
+ uint32_t CLK1_CLK0_CURRENT_CNT; //dispclk
+ uint32_t CLK1_CLK1_CURRENT_CNT; //dppclk
+ uint32_t CLK1_CLK2_CURRENT_CNT; //dprefclk
+ uint32_t CLK1_CLK3_CURRENT_CNT; //dcfclk
+ uint32_t CLK1_CLK4_CURRENT_CNT; //dtbclk
+ //uint32_t CLK1_CLK5_CURRENT_CNT; //dpiaclk
+ //uint32_t CLK1_CLK6_CURRENT_CNT; //srdbgclk
+ uint32_t CLK1_CLK3_DS_CNTL; //dcf_deep_sleep_divider
+ uint32_t CLK1_CLK3_ALLOW_DS; //dcf_deep_sleep_allow
+
+ uint32_t CLK1_CLK0_BYPASS_CNTL; //dispclk bypass
+ uint32_t CLK1_CLK1_BYPASS_CNTL; //dppclk bypass
+ uint32_t CLK1_CLK2_BYPASS_CNTL; //dprefclk bypass
+ uint32_t CLK1_CLK3_BYPASS_CNTL; //dcfclk bypass
+ uint32_t CLK1_CLK4_BYPASS_CNTL; //dtbclk bypass
+};
+
struct dcn301_clk_internal {
int dummy;
uint32_t CLK1_CLK0_CURRENT_CNT; //dispclk
@@ -314,6 +333,7 @@ struct clk_mgr {
bool force_smu_not_present;
bool dc_mode_softmax_enabled;
int dprefclk_khz; // Used by program pixel clock in clock source funcs, need to figureout where this goes
+ int dp_dto_source_clock_in_khz; // Used to program DP DTO with ss adjustment on DCN314
int dentist_vco_freq_khz;
struct clk_state_registers_and_bypass boot_snapshot;
struct clk_bw_params *bw_params;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
index ce2f0c0e82bd..b9a06bf84cc9 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
@@ -59,8 +59,8 @@ enum dentist_dispclk_change_mode {
struct dp_dto_params {
int otg_inst;
enum signal_type signal;
- long long pixclk_hz;
- long long refclk_hz;
+ uint64_t pixclk_hz;
+ uint64_t refclk_hz;
};
enum pixel_rate_div {
@@ -201,6 +201,10 @@ struct dccg_funcs {
struct dccg *dccg,
enum streamclk_source src,
uint32_t otg_inst);
+ void (*set_dto_dscclk)(
+ struct dccg *dccg,
+ uint32_t dsc_inst);
+ void (*set_ref_dscclk)(struct dccg *dccg, uint32_t dsc_inst);
};
#endif //__DAL_DCCG_H__
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
index 86b711dcc785..729ca0064e94 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
@@ -188,6 +188,10 @@ struct dwbc_funcs {
bool (*is_enabled)(
struct dwbc *dwbc);
+ void (*set_fc_enable)(
+ struct dwbc *dwbc,
+ enum dwb_frame_capture_enable enable);
+
void (*set_stereo)(
struct dwbc *dwbc,
struct dwb_stereo_params *stereo_params);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index b95ae9596c3b..dcae23faeee3 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -43,6 +43,7 @@
* to be used inside loops and for determining array sizes.
*/
#define MAX_PIPES 6
+#define MAX_PHANTOM_PIPES (MAX_PIPES / 2)
#define MAX_DIG_LINK_ENCODERS 7
#define MAX_DWB_PIPES 1
#define MAX_HPO_DP2_ENCODERS 4
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h b/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h
index 24af9d80b937..5dcbaa2db964 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h
@@ -40,6 +40,7 @@ struct panel_cntl_backlight_registers {
unsigned int BL_PWM_PERIOD_CNTL;
unsigned int LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV;
unsigned int PANEL_PWRSEQ_REF_DIV2;
+ unsigned int USER_LEVEL;
};
struct panel_cntl_funcs {
@@ -56,12 +57,14 @@ struct panel_cntl_funcs {
struct panel_cntl_init_data {
struct dc_context *ctx;
uint32_t inst;
+ uint32_t pwrseq_inst;
};
struct panel_cntl {
const struct panel_cntl_funcs *funcs;
struct dc_context *ctx;
uint32_t inst;
+ uint32_t pwrseq_inst;
/* registers setting needs to be saved and restored at InitBacklight */
struct panel_cntl_backlight_registers stored_backlight_registers;
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link.h
index 7439865d1b50..26fe81f213da 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link.h
@@ -289,6 +289,8 @@ struct link_service {
bool (*edp_replay_residency)(const struct dc_link *link,
unsigned int *residency, const bool is_start,
const bool is_alpm);
+ bool (*edp_set_replay_power_opt_and_coasting_vtotal)(struct dc_link *link,
+ const unsigned int *power_opts, uint16_t coasting_vtotal);
bool (*edp_wait_for_t12)(struct dc_link *link);
bool (*edp_is_ilr_optimization_required)(struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 0458d2d749f4..c958ef37b78a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -573,9 +573,6 @@ void update_audio_usage(
unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format);
-void get_audio_check(struct audio_info *aud_modes,
- struct audio_check *aud_chk);
-
bool get_temp_dp_link_res(struct dc_link *link,
struct link_resource *link_res,
struct dc_link_settings *link_settings);
@@ -622,5 +619,4 @@ enum dc_status update_dp_encoder_resources_for_test_harness(const struct dc *dc,
struct pipe_ctx *pipe_ctx);
bool check_subvp_sw_cursor_fallback_req(const struct dc *dc, struct dc_stream_state *stream);
-
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
index aa0a086aa7fc..3cbfbf8d107e 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
@@ -776,10 +776,26 @@ static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable)
*/
void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
{
+ /* TODO: Move this to HWSS as this is hardware programming sequence not a
+ * link layer sequence
+ */
struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
struct dc_stream_state *stream = pipe_ctx->stream;
struct pipe_ctx *odm_pipe;
int opp_cnt = 1;
+ struct dccg *dccg = dc->res_pool->dccg;
+ /* It has been found that when DSCCLK is lower than 16Mhz, we will get DCN
+ * register access hung. When DSCCLk is based on refclk, DSCCLk is always a
+ * fixed value higher than 16Mhz so the issue doesn't occur. When DSCCLK is
+ * generated by DTO, DSCCLK would be based on 1/3 dispclk. For small timings
+ * with DSC such as 480p60Hz, the dispclk could be low enough to trigger
+ * this problem. We are implementing a workaround here to keep using dscclk
+ * based on fixed value refclk when timing is smaller than 3x16Mhz (i.e
+ * 48Mhz) pixel clock to avoid hitting this problem.
+ */
+ bool should_use_dto_dscclk = (dccg->funcs->set_dto_dscclk != NULL) &&
+ stream->timing.pix_clk_100hz > 480000;
DC_LOGGER_INIT(dsc->ctx->logger);
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
@@ -802,11 +818,15 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg);
dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst);
+ if (should_use_dto_dscclk)
+ dccg->funcs->set_dto_dscclk(dccg, dsc->inst);
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc;
odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg);
odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst);
+ if (should_use_dto_dscclk)
+ dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst);
}
dsc_cfg.dc_dsc_cfg.num_slices_h *= opp_cnt;
dsc_cfg.pic_width *= opp_cnt;
@@ -856,9 +876,14 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
}
/* disable DSC block */
+ if (dccg->funcs->set_ref_dscclk)
+ dccg->funcs->set_ref_dscclk(dccg, pipe_ctx->stream_res.dsc->inst);
pipe_ctx->stream_res.dsc->funcs->dsc_disable(pipe_ctx->stream_res.dsc);
- for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
+ if (dccg->funcs->set_ref_dscclk)
+ dccg->funcs->set_ref_dscclk(dccg, odm_pipe->stream_res.dsc->inst);
odm_pipe->stream_res.dsc->funcs->dsc_disable(odm_pipe->stream_res.dsc);
+ }
}
}
@@ -875,11 +900,15 @@ bool link_set_dsc_pps_packet(struct pipe_ctx *pipe_ctx, bool enable, bool immedi
{
struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
struct dc_stream_state *stream = pipe_ctx->stream;
- DC_LOGGER_INIT(dsc->ctx->logger);
- if (!pipe_ctx->stream->timing.flags.DSC || !dsc)
+ if (!pipe_ctx->stream->timing.flags.DSC)
return false;
+ if (!dsc)
+ return false;
+
+ DC_LOGGER_INIT(dsc->ctx->logger);
+
if (enable) {
struct dsc_config dsc_cfg;
uint8_t dsc_packed_pps[128];
@@ -1057,18 +1086,21 @@ static struct fixed31_32 get_pbn_from_bw_in_kbps(uint64_t kbps)
uint32_t denominator = 1;
/*
- * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
+ * The 1.006 factor (margin 5300ppm + 300ppm ~ 0.6% as per spec) is not
+ * required when determining PBN/time slot utilization on the link between
+ * us and the branch, since that overhead is already accounted for in
+ * the get_pbn_per_slot function.
+ *
* The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
* common multiplier to render an integer PBN for all link rate/lane
* counts combinations
* calculate
- * peak_kbps *= (1006/1000)
* peak_kbps *= (64/54)
- * peak_kbps *= 8 convert to bytes
+ * peak_kbps /= (8 * 1000) convert to bytes
*/
- numerator = 64 * PEAK_FACTOR_X1000;
- denominator = 54 * 8 * 1000 * 1000;
+ numerator = 64;
+ denominator = 54 * 8 * 1000;
kbps *= numerator;
peak_kbps = dc_fixpt_from_fraction(kbps, denominator);
@@ -1977,17 +2009,11 @@ static enum dc_status enable_link_dp(struct dc_state *state,
}
}
- /*
- * If the link is DP-over-USB4 do the following:
- * - Train with fallback when enabling DPIA link. Conventional links are
+ /* Train with fallback when enabling DPIA link. Conventional links are
* trained with fallback during sink detection.
- * - Allocate only what the stream needs for bw in Gbps. Inform the CM
- * in case stream needs more or less bw from what has been allocated
- * earlier at plug time.
*/
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
do_fallback = true;
- }
/*
* Temporary w/a to get DP2.0 link rates to work with SST.
@@ -2169,6 +2195,32 @@ static enum dc_status enable_link(
return status;
}
+static bool allocate_usb4_bandwidth_for_stream(struct dc_stream_state *stream, int bw)
+{
+ return true;
+}
+
+static bool allocate_usb4_bandwidth(struct dc_stream_state *stream)
+{
+ bool ret;
+
+ int bw = dc_bandwidth_in_kbps_from_timing(&stream->timing,
+ dc_link_get_highest_encoding_format(stream->sink->link));
+
+ ret = allocate_usb4_bandwidth_for_stream(stream, bw);
+
+ return ret;
+}
+
+static bool deallocate_usb4_bandwidth(struct dc_stream_state *stream)
+{
+ bool ret;
+
+ ret = allocate_usb4_bandwidth_for_stream(stream, 0);
+
+ return ret;
+}
+
void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
{
struct dc *dc = pipe_ctx->stream->ctx->dc;
@@ -2204,6 +2256,9 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
update_psp_stream_config(pipe_ctx, true);
dc->hwss.blank_stream(pipe_ctx);
+ if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ deallocate_usb4_bandwidth(pipe_ctx->stream);
+
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
deallocate_mst_payload(pipe_ctx);
else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
@@ -2446,6 +2501,9 @@ void link_set_dpms_on(
}
}
+ if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ allocate_usb4_bandwidth(pipe_ctx->stream);
+
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
allocate_mst_payload(pipe_ctx);
else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
index 6b306ea58b9b..37d3027c32dc 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
@@ -216,6 +216,7 @@ static void construct_link_service_edp_panel_control(struct link_service *link_s
link_srv->edp_send_replay_cmd = edp_send_replay_cmd;
link_srv->edp_set_coasting_vtotal = edp_set_coasting_vtotal;
link_srv->edp_replay_residency = edp_replay_residency;
+ link_srv->edp_set_replay_power_opt_and_coasting_vtotal = edp_set_replay_power_opt_and_coasting_vtotal;
link_srv->edp_wait_for_t12 = edp_wait_for_t12;
link_srv->edp_is_ilr_optimization_required =
@@ -369,6 +370,30 @@ static enum transmitter translate_encoder_to_transmitter(
}
}
+static uint8_t translate_dig_inst_to_pwrseq_inst(struct dc_link *link)
+{
+ uint8_t pwrseq_inst = 0xF;
+ struct dc_context *dc_ctx = link->dc->ctx;
+
+ DC_LOGGER_INIT(dc_ctx->logger);
+
+ switch (link->eng_id) {
+ case ENGINE_ID_DIGA:
+ pwrseq_inst = 0;
+ break;
+ case ENGINE_ID_DIGB:
+ pwrseq_inst = 1;
+ break;
+ default:
+ DC_LOG_WARNING("Unsupported pwrseq engine id: %d!\n", link->eng_id);
+ ASSERT(false);
+ break;
+ }
+
+ return pwrseq_inst;
+}
+
+
static void link_destruct(struct dc_link *link)
{
int i;
@@ -596,24 +621,6 @@ static bool construct_phy(struct dc_link *link,
link->ddc_hw_inst =
dal_ddc_get_line(get_ddc_pin(link->ddc));
-
- if (link->dc->res_pool->funcs->panel_cntl_create &&
- (link->link_id.id == CONNECTOR_ID_EDP ||
- link->link_id.id == CONNECTOR_ID_LVDS)) {
- panel_cntl_init_data.ctx = dc_ctx;
- panel_cntl_init_data.inst =
- panel_cntl_init_data.ctx->dc_edp_id_count;
- link->panel_cntl =
- link->dc->res_pool->funcs->panel_cntl_create(
- &panel_cntl_init_data);
- panel_cntl_init_data.ctx->dc_edp_id_count++;
-
- if (link->panel_cntl == NULL) {
- DC_ERROR("Failed to create link panel_cntl!\n");
- goto panel_cntl_create_fail;
- }
- }
-
enc_init_data.ctx = dc_ctx;
bp_funcs->get_src_obj(dc_ctx->dc_bios, link->link_id, 0,
&enc_init_data.encoder);
@@ -644,6 +651,23 @@ static bool construct_phy(struct dc_link *link,
link->dc->res_pool->dig_link_enc_count++;
link->link_enc_hw_inst = link->link_enc->transmitter;
+
+ if (link->dc->res_pool->funcs->panel_cntl_create &&
+ (link->link_id.id == CONNECTOR_ID_EDP ||
+ link->link_id.id == CONNECTOR_ID_LVDS)) {
+ panel_cntl_init_data.ctx = dc_ctx;
+ panel_cntl_init_data.inst = panel_cntl_init_data.ctx->dc_edp_id_count;
+ panel_cntl_init_data.pwrseq_inst = translate_dig_inst_to_pwrseq_inst(link);
+ link->panel_cntl =
+ link->dc->res_pool->funcs->panel_cntl_create(
+ &panel_cntl_init_data);
+ panel_cntl_init_data.ctx->dc_edp_id_count++;
+
+ if (link->panel_cntl == NULL) {
+ DC_ERROR("Failed to create link panel_cntl!\n");
+ goto panel_cntl_create_fail;
+ }
+ }
for (i = 0; i < 4; i++) {
if (bp_funcs->get_device_tag(dc_ctx->dc_bios,
link->link_id, i,
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
index b45fda96eaf6..8fe66c367850 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
@@ -346,23 +346,61 @@ enum dc_status link_validate_mode_timing(
return DC_OK;
}
+/*
+ * This function calculates the bandwidth required for the stream timing
+ * and aggregates the stream bandwidth for the respective dpia link
+ *
+ * @stream: pointer to the dc_stream_state struct instance
+ * @num_streams: number of streams to be validated
+ *
+ * return: true if validation is succeeded
+ */
bool link_validate_dpia_bandwidth(const struct dc_stream_state *stream, const unsigned int num_streams)
{
- bool ret = true;
- int bw_needed[MAX_DPIA_NUM];
- struct dc_link *link[MAX_DPIA_NUM];
-
- if (!num_streams || num_streams > MAX_DPIA_NUM)
- return ret;
+ int bw_needed[MAX_DPIA_NUM] = {0};
+ struct dc_link *dpia_link[MAX_DPIA_NUM] = {0};
+ int num_dpias = 0;
for (uint8_t i = 0; i < num_streams; ++i) {
+ if (stream[i].signal == SIGNAL_TYPE_DISPLAY_PORT) {
+ /* new dpia sst stream, check whether it exceeds max dpia */
+ if (num_dpias >= MAX_DPIA_NUM)
+ return false;
- link[i] = stream[i].link;
- bw_needed[i] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing,
- dc_link_get_highest_encoding_format(link[i]));
+ dpia_link[num_dpias] = stream[i].link;
+ bw_needed[num_dpias] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing,
+ dc_link_get_highest_encoding_format(dpia_link[num_dpias]));
+ num_dpias++;
+ } else if (stream[i].signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ uint8_t j = 0;
+ /* check whether its a known dpia link */
+ for (; j < num_dpias; ++j) {
+ if (dpia_link[j] == stream[i].link)
+ break;
+ }
+
+ if (j == num_dpias) {
+ /* new dpia mst stream, check whether it exceeds max dpia */
+ if (num_dpias >= MAX_DPIA_NUM)
+ return false;
+ else {
+ dpia_link[j] = stream[i].link;
+ num_dpias++;
+ }
+ }
+
+ bw_needed[j] += dc_bandwidth_in_kbps_from_timing(&stream[i].timing,
+ dc_link_get_highest_encoding_format(dpia_link[j]));
+ }
}
- ret = dpia_validate_usb4_bw(link, bw_needed, num_streams);
+ /* Include dp overheads */
+ for (uint8_t i = 0; i < num_dpias; ++i) {
+ int dp_overhead = 0;
+
+ dp_overhead = link_dp_dpia_get_dp_overhead_in_dp_tunneling(dpia_link[i]);
+ bw_needed[i] += dp_overhead;
+ }
- return ret;
+ return dpia_validate_usb4_bw(dpia_link, bw_needed, num_dpias);
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.h b/drivers/gpu/drm/amd/display/dc/link/link_validation.h
index 4a954317d0da..595fb05946e9 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_validation.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.h
@@ -25,6 +25,7 @@
#ifndef __LINK_VALIDATION_H__
#define __LINK_VALIDATION_H__
#include "link.h"
+
enum dc_status link_validate_mode_timing(
const struct dc_stream_state *stream,
struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
index db87aa7b5c90..289f5d133342 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
@@ -412,12 +412,18 @@ static enum dc_link_rate get_cable_max_link_rate(struct dc_link *link)
{
enum dc_link_rate cable_max_link_rate = LINK_RATE_UNKNOWN;
- if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR20)
+ if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR20) {
cable_max_link_rate = LINK_RATE_UHBR20;
- else if (link->dpcd_caps.cable_id.bits.UHBR13_5_CAPABILITY)
+ } else if (link->dpcd_caps.cable_id.bits.UHBR13_5_CAPABILITY) {
cable_max_link_rate = LINK_RATE_UHBR13_5;
- else if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR10)
- cable_max_link_rate = LINK_RATE_UHBR10;
+ } else if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR10) {
+ // allow DP40 cables to do UHBR13.5 for passive or unknown cable type
+ if (link->dpcd_caps.cable_id.bits.CABLE_TYPE < 2) {
+ cable_max_link_rate = LINK_RATE_UHBR13_5;
+ } else {
+ cable_max_link_rate = LINK_RATE_UHBR10;
+ }
+ }
return cable_max_link_rate;
}
@@ -1392,7 +1398,7 @@ static bool get_usbc_cable_id(struct dc_link *link, union dp_cable_id *cable_id)
cmd.cable_id.header.payload_bytes = sizeof(cmd.cable_id.data);
cmd.cable_id.data.input.phy_inst = resource_transmitter_to_phy_idx(
link->dc, link->link_enc->transmitter);
- if (dm_execute_dmub_cmd(link->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
+ if (dc_wake_and_execute_dmub_cmd(link->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
cmd.cable_id.header.ret_status == 1) {
cable_id->raw = cmd.cable_id.data.output_raw;
DC_LOG_DC("usbc_cable_id = %d.\n", cable_id->raw);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
index 0bb749133909..6af42ba9885c 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
@@ -82,24 +82,33 @@ bool dpia_query_hpd_status(struct dc_link *link)
{
union dmub_rb_cmd cmd = {0};
struct dc_dmub_srv *dmub_srv = link->ctx->dmub_srv;
- bool is_hpd_high = false;
/* prepare QUERY_HPD command */
cmd.query_hpd.header.type = DMUB_CMD__QUERY_HPD_STATE;
cmd.query_hpd.data.instance = link->link_id.enum_id - ENUM_ID_1;
cmd.query_hpd.data.ch_type = AUX_CHANNEL_DPIA;
- /* Return HPD status reported by DMUB if query successfully executed. */
- if (dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) && cmd.query_hpd.data.status == AUX_RET_SUCCESS)
- is_hpd_high = cmd.query_hpd.data.result;
-
- DC_LOG_DEBUG("%s: link(%d) dpia(%d) cmd_status(%d) result(%d)\n",
- __func__,
- link->link_index,
- link->link_id.enum_id - ENUM_ID_1,
- cmd.query_hpd.data.status,
- cmd.query_hpd.data.result);
-
- return is_hpd_high;
+ /* Query dpia hpd status from dmub */
+ if (dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd,
+ DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
+ cmd.query_hpd.data.status == AUX_RET_SUCCESS) {
+ DC_LOG_DEBUG("%s: for link(%d) dpia(%d) success, current_hpd_status(%d) new_hpd_status(%d)\n",
+ __func__,
+ link->link_index,
+ link->link_id.enum_id - ENUM_ID_1,
+ link->hpd_status,
+ cmd.query_hpd.data.result);
+ link->hpd_status = cmd.query_hpd.data.result;
+ } else {
+ DC_LOG_ERROR("%s: for link(%d) dpia(%d) failed with status(%d), current_hpd_status(%d) new_hpd_status(0)\n",
+ __func__,
+ link->link_index,
+ link->link_id.enum_id - ENUM_ID_1,
+ cmd.query_hpd.data.status,
+ link->hpd_status);
+ link->hpd_status = false;
+ }
+
+ return link->hpd_status;
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
index 7581023daa47..dd0d2b206462 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
@@ -50,15 +50,28 @@ static bool get_bw_alloc_proceed_flag(struct dc_link *tmp)
&& tmp->hpd_status
&& tmp->dpia_bw_alloc_config.bw_alloc_enabled);
}
+
static void reset_bw_alloc_struct(struct dc_link *link)
{
link->dpia_bw_alloc_config.bw_alloc_enabled = false;
- link->dpia_bw_alloc_config.sink_verified_bw = 0;
- link->dpia_bw_alloc_config.sink_max_bw = 0;
+ link->dpia_bw_alloc_config.link_verified_bw = 0;
+ link->dpia_bw_alloc_config.link_max_bw = 0;
+ link->dpia_bw_alloc_config.allocated_bw = 0;
link->dpia_bw_alloc_config.estimated_bw = 0;
link->dpia_bw_alloc_config.bw_granularity = 0;
+ link->dpia_bw_alloc_config.dp_overhead = 0;
link->dpia_bw_alloc_config.response_ready = false;
+ link->dpia_bw_alloc_config.nrd_max_lane_count = 0;
+ link->dpia_bw_alloc_config.nrd_max_link_rate = 0;
+ for (int i = 0; i < MAX_SINKS_PER_LINK; i++)
+ link->dpia_bw_alloc_config.remote_sink_req_bw[i] = 0;
+ DC_LOG_DEBUG("reset usb4 bw alloc of link(%d)\n", link->link_index);
}
+
+#define BW_GRANULARITY_0 4 // 0.25 Gbps
+#define BW_GRANULARITY_1 2 // 0.5 Gbps
+#define BW_GRANULARITY_2 1 // 1 Gbps
+
static uint8_t get_bw_granularity(struct dc_link *link)
{
uint8_t bw_granularity = 0;
@@ -71,16 +84,20 @@ static uint8_t get_bw_granularity(struct dc_link *link)
switch (bw_granularity & 0x3) {
case 0:
- bw_granularity = 4;
+ bw_granularity = BW_GRANULARITY_0;
break;
case 1:
+ bw_granularity = BW_GRANULARITY_1;
+ break;
+ case 2:
default:
- bw_granularity = 2;
+ bw_granularity = BW_GRANULARITY_2;
break;
}
return bw_granularity;
}
+
static int get_estimated_bw(struct dc_link *link)
{
uint8_t bw_estimated_bw = 0;
@@ -93,31 +110,33 @@ static int get_estimated_bw(struct dc_link *link)
return bw_estimated_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
}
-static bool allocate_usb4_bw(int *stream_allocated_bw, int bw_needed, struct dc_link *link)
+
+static int get_non_reduced_max_link_rate(struct dc_link *link)
{
- if (bw_needed > 0)
- *stream_allocated_bw += bw_needed;
+ uint8_t nrd_max_link_rate = 0;
- return true;
+ core_link_read_dpcd(
+ link,
+ DP_TUNNELING_MAX_LINK_RATE,
+ &nrd_max_link_rate,
+ sizeof(uint8_t));
+
+ return nrd_max_link_rate;
}
-static bool deallocate_usb4_bw(int *stream_allocated_bw, int bw_to_dealloc, struct dc_link *link)
-{
- bool ret = false;
- if (*stream_allocated_bw > 0) {
- *stream_allocated_bw -= bw_to_dealloc;
- ret = true;
- } else {
- //Do nothing for now
- ret = true;
- }
+static int get_non_reduced_max_lane_count(struct dc_link *link)
+{
+ uint8_t nrd_max_lane_count = 0;
- // Unplug so reset values
- if (!link->hpd_status)
- reset_bw_alloc_struct(link);
+ core_link_read_dpcd(
+ link,
+ DP_TUNNELING_MAX_LANE_COUNT,
+ &nrd_max_lane_count,
+ sizeof(uint8_t));
- return ret;
+ return nrd_max_lane_count;
}
+
/*
* Read all New BW alloc configuration ex: estimated_bw, allocated_bw,
* granuality, Driver_ID, CM_Group, & populate the BW allocation structs
@@ -125,10 +144,22 @@ static bool deallocate_usb4_bw(int *stream_allocated_bw, int bw_to_dealloc, stru
*/
static void init_usb4_bw_struct(struct dc_link *link)
{
- // Init the known values
+ reset_bw_alloc_struct(link);
+
+ /* init the known values */
link->dpia_bw_alloc_config.bw_granularity = get_bw_granularity(link);
link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link);
+ link->dpia_bw_alloc_config.nrd_max_link_rate = get_non_reduced_max_link_rate(link);
+ link->dpia_bw_alloc_config.nrd_max_lane_count = get_non_reduced_max_lane_count(link);
+
+ DC_LOG_DEBUG("%s: bw_granularity(%d), estimated_bw(%d)\n",
+ __func__, link->dpia_bw_alloc_config.bw_granularity,
+ link->dpia_bw_alloc_config.estimated_bw);
+ DC_LOG_DEBUG("%s: nrd_max_link_rate(%d), nrd_max_lane_count(%d)\n",
+ __func__, link->dpia_bw_alloc_config.nrd_max_link_rate,
+ link->dpia_bw_alloc_config.nrd_max_lane_count);
}
+
static uint8_t get_lowest_dpia_index(struct dc_link *link)
{
const struct dc *dc_struct = link->dc;
@@ -141,51 +172,66 @@ static uint8_t get_lowest_dpia_index(struct dc_link *link)
dc_struct->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
continue;
- if (idx > dc_struct->links[i]->link_index)
+ if (idx > dc_struct->links[i]->link_index) {
idx = dc_struct->links[i]->link_index;
+ break;
+ }
}
return idx;
}
+
/*
- * Get the Max Available BW or Max Estimated BW for each Host Router
+ * Get the maximum dp tunnel banwidth of host router
*
- * @link: pointer to the dc_link struct instance
- * @type: ESTIMATD BW or MAX AVAILABLE BW
+ * @dc: pointer to the dc struct instance
+ * @hr_index: host router index
*
- * return: response_ready flag from dc_link struct
+ * return: host router maximum dp tunnel bandwidth
*/
-static int get_host_router_total_bw(struct dc_link *link, uint8_t type)
+static int get_host_router_total_dp_tunnel_bw(const struct dc *dc, uint8_t hr_index)
{
- const struct dc *dc_struct = link->dc;
- uint8_t lowest_dpia_index = get_lowest_dpia_index(link);
- uint8_t idx = (link->link_index - lowest_dpia_index) / 2, idx_temp = 0;
- struct dc_link *link_temp;
+ uint8_t lowest_dpia_index = get_lowest_dpia_index(dc->links[0]);
+ uint8_t hr_index_temp = 0;
+ struct dc_link *link_dpia_primary, *link_dpia_secondary;
int total_bw = 0;
- int i;
-
- for (i = 0; i < MAX_PIPES * 2; ++i) {
- if (!dc_struct->links[i] || dc_struct->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
- continue;
+ for (uint8_t i = 0; i < MAX_PIPES * 2; ++i) {
- link_temp = dc_struct->links[i];
- if (!link_temp || !link_temp->hpd_status)
+ if (!dc->links[i] || dc->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
continue;
- idx_temp = (link_temp->link_index - lowest_dpia_index) / 2;
-
- if (idx_temp == idx) {
-
- if (type == HOST_ROUTER_BW_ESTIMATED)
- total_bw += link_temp->dpia_bw_alloc_config.estimated_bw;
- else if (type == HOST_ROUTER_BW_ALLOCATED)
- total_bw += link_temp->dpia_bw_alloc_config.sink_allocated_bw;
+ hr_index_temp = (dc->links[i]->link_index - lowest_dpia_index) / 2;
+
+ if (hr_index_temp == hr_index) {
+ link_dpia_primary = dc->links[i];
+ link_dpia_secondary = dc->links[i + 1];
+
+ /**
+ * If BW allocation enabled on both DPIAs, then
+ * HR BW = Estimated(dpia_primary) + Allocated(dpia_secondary)
+ * otherwise HR BW = Estimated(bw alloc enabled dpia)
+ */
+ if ((link_dpia_primary->hpd_status &&
+ link_dpia_primary->dpia_bw_alloc_config.bw_alloc_enabled) &&
+ (link_dpia_secondary->hpd_status &&
+ link_dpia_secondary->dpia_bw_alloc_config.bw_alloc_enabled)) {
+ total_bw += link_dpia_primary->dpia_bw_alloc_config.estimated_bw +
+ link_dpia_secondary->dpia_bw_alloc_config.allocated_bw;
+ } else if (link_dpia_primary->hpd_status &&
+ link_dpia_primary->dpia_bw_alloc_config.bw_alloc_enabled) {
+ total_bw = link_dpia_primary->dpia_bw_alloc_config.estimated_bw;
+ } else if (link_dpia_secondary->hpd_status &&
+ link_dpia_secondary->dpia_bw_alloc_config.bw_alloc_enabled) {
+ total_bw += link_dpia_secondary->dpia_bw_alloc_config.estimated_bw;
+ }
+ break;
}
}
return total_bw;
}
+
/*
* Cleanup function for when the dpia is unplugged to reset struct
* and perform any required clean up
@@ -194,42 +240,49 @@ static int get_host_router_total_bw(struct dc_link *link, uint8_t type)
*
* return: none
*/
-static bool dpia_bw_alloc_unplug(struct dc_link *link)
+static void dpia_bw_alloc_unplug(struct dc_link *link)
{
- if (!link)
- return true;
-
- return deallocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw,
- link->dpia_bw_alloc_config.sink_allocated_bw, link);
+ if (link) {
+ DC_LOG_DEBUG("%s: resetting bw alloc config for link(%d)\n",
+ __func__, link->link_index);
+ reset_bw_alloc_struct(link);
+ }
}
+
static void set_usb4_req_bw_req(struct dc_link *link, int req_bw)
{
uint8_t requested_bw;
uint32_t temp;
- // 1. Add check for this corner case #1
- if (req_bw > link->dpia_bw_alloc_config.estimated_bw)
+ /* Error check whether request bw greater than allocated */
+ if (req_bw > link->dpia_bw_alloc_config.estimated_bw) {
+ DC_LOG_ERROR("%s: Request bw greater than estimated bw for link(%d)\n",
+ __func__, link->link_index);
req_bw = link->dpia_bw_alloc_config.estimated_bw;
+ }
temp = req_bw * link->dpia_bw_alloc_config.bw_granularity;
requested_bw = temp / Kbps_TO_Gbps;
- // Always make sure to add more to account for floating points
+ /* Always make sure to add more to account for floating points */
if (temp % Kbps_TO_Gbps)
++requested_bw;
- // 2. Add check for this corner case #2
+ /* Error check whether requested and allocated are equal */
req_bw = requested_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
- if (req_bw == link->dpia_bw_alloc_config.sink_allocated_bw)
- return;
+ if (req_bw == link->dpia_bw_alloc_config.allocated_bw) {
+ DC_LOG_ERROR("%s: Request bw equals to allocated bw for link(%d)\n",
+ __func__, link->link_index);
+ }
- if (core_link_write_dpcd(
+ link->dpia_bw_alloc_config.response_ready = false; // Reset flag
+ core_link_write_dpcd(
link,
REQUESTED_BW,
&requested_bw,
- sizeof(uint8_t)) == DC_OK)
- link->dpia_bw_alloc_config.response_ready = false; // Reset flag
+ sizeof(uint8_t));
}
+
/*
* Return the response_ready flag from dc_link struct
*
@@ -241,6 +294,7 @@ static bool get_cm_response_ready_flag(struct dc_link *link)
{
return link->dpia_bw_alloc_config.response_ready;
}
+
// ------------------------------------------------------------------
// PUBLIC FUNCTIONS
// ------------------------------------------------------------------
@@ -277,27 +331,27 @@ bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link)
DPTX_BW_ALLOCATION_MODE_CONTROL,
&response,
sizeof(uint8_t)) != DC_OK) {
- DC_LOG_DEBUG("%s: **** FAILURE Enabling DPtx BW Allocation Mode Support ***\n",
- __func__);
+ DC_LOG_DEBUG("%s: FAILURE Enabling DPtx BW Allocation Mode Support for link(%d)\n",
+ __func__, link->link_index);
} else {
// SUCCESS Enabled DPtx BW Allocation Mode Support
- link->dpia_bw_alloc_config.bw_alloc_enabled = true;
- DC_LOG_DEBUG("%s: **** SUCCESS Enabling DPtx BW Allocation Mode Support ***\n",
- __func__);
+ DC_LOG_DEBUG("%s: SUCCESS Enabling DPtx BW Allocation Mode Support for link(%d)\n",
+ __func__, link->link_index);
ret = true;
init_usb4_bw_struct(link);
+ link->dpia_bw_alloc_config.bw_alloc_enabled = true;
}
}
out:
return ret;
}
+
void dpia_handle_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t result)
{
int bw_needed = 0;
int estimated = 0;
- int host_router_total_estimated_bw = 0;
if (!get_bw_alloc_proceed_flag((link)))
return;
@@ -306,14 +360,22 @@ void dpia_handle_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t res
case DPIA_BW_REQ_FAILED:
- DC_LOG_DEBUG("%s: *** *** BW REQ FAILURE for DP-TX Request *** ***\n", __func__);
+ /*
+ * Ideally, we shouldn't run into this case as we always validate available
+ * bandwidth and request within that limit
+ */
+ estimated = bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
+
+ DC_LOG_ERROR("%s: BW REQ FAILURE for DP-TX Request for link(%d)\n",
+ __func__, link->link_index);
+ DC_LOG_ERROR("%s: current estimated_bw(%d), new estimated_bw(%d)\n",
+ __func__, link->dpia_bw_alloc_config.estimated_bw, estimated);
- // Update the new Estimated BW value updated by CM
- link->dpia_bw_alloc_config.estimated_bw =
- bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
+ /* Update the new Estimated BW value updated by CM */
+ link->dpia_bw_alloc_config.estimated_bw = estimated;
+ /* Allocate the previously requested bandwidth */
set_usb4_req_bw_req(link, link->dpia_bw_alloc_config.estimated_bw);
- link->dpia_bw_alloc_config.response_ready = false;
/*
* If FAIL then it is either:
@@ -326,68 +388,34 @@ void dpia_handle_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t res
case DPIA_BW_REQ_SUCCESS:
- DC_LOG_DEBUG("%s: *** BW REQ SUCCESS for DP-TX Request ***\n", __func__);
-
- // 1. SUCCESS 1st time before any Pruning is done
- // 2. SUCCESS after prev. FAIL before any Pruning is done
- // 3. SUCCESS after Pruning is done but before enabling link
-
bw_needed = bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
- // 1.
- if (!link->dpia_bw_alloc_config.sink_allocated_bw) {
-
- allocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw, bw_needed, link);
- link->dpia_bw_alloc_config.sink_verified_bw =
- link->dpia_bw_alloc_config.sink_allocated_bw;
-
- // SUCCESS from first attempt
- if (link->dpia_bw_alloc_config.sink_allocated_bw >
- link->dpia_bw_alloc_config.sink_max_bw)
- link->dpia_bw_alloc_config.sink_verified_bw =
- link->dpia_bw_alloc_config.sink_max_bw;
- }
- // 3.
- else if (link->dpia_bw_alloc_config.sink_allocated_bw) {
-
- // Find out how much do we need to de-alloc
- if (link->dpia_bw_alloc_config.sink_allocated_bw > bw_needed)
- deallocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw,
- link->dpia_bw_alloc_config.sink_allocated_bw - bw_needed, link);
- else
- allocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw,
- bw_needed - link->dpia_bw_alloc_config.sink_allocated_bw, link);
- }
+ DC_LOG_DEBUG("%s: BW REQ SUCCESS for DP-TX Request for link(%d)\n",
+ __func__, link->link_index);
+ DC_LOG_DEBUG("%s: current allocated_bw(%d), new allocated_bw(%d)\n",
+ __func__, link->dpia_bw_alloc_config.allocated_bw, bw_needed);
- // 4. If this is the 2nd sink then any unused bw will be reallocated to master DPIA
- // => check if estimated_bw changed
+ link->dpia_bw_alloc_config.allocated_bw = bw_needed;
link->dpia_bw_alloc_config.response_ready = true;
break;
case DPIA_EST_BW_CHANGED:
- DC_LOG_DEBUG("%s: *** ESTIMATED BW CHANGED for DP-TX Request ***\n", __func__);
-
estimated = bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
- host_router_total_estimated_bw = get_host_router_total_bw(link, HOST_ROUTER_BW_ESTIMATED);
- // 1. If due to unplug of other sink
- if (estimated == host_router_total_estimated_bw) {
- // First update the estimated & max_bw fields
- if (link->dpia_bw_alloc_config.estimated_bw < estimated)
- link->dpia_bw_alloc_config.estimated_bw = estimated;
- }
- // 2. If due to realloc bw btw 2 dpia due to plug OR realloc unused Bw
- else {
- // We lost estimated bw usually due to plug event of other dpia
- link->dpia_bw_alloc_config.estimated_bw = estimated;
- }
+ DC_LOG_DEBUG("%s: ESTIMATED BW CHANGED for link(%d)\n",
+ __func__, link->link_index);
+ DC_LOG_DEBUG("%s: current estimated_bw(%d), new estimated_bw(%d)\n",
+ __func__, link->dpia_bw_alloc_config.estimated_bw, estimated);
+
+ link->dpia_bw_alloc_config.estimated_bw = estimated;
break;
case DPIA_BW_ALLOC_CAPS_CHANGED:
- DC_LOG_DEBUG("%s: *** BW ALLOC CAPABILITY CHANGED for DP-TX Request ***\n", __func__);
+ DC_LOG_ERROR("%s: BW ALLOC CAPABILITY CHANGED to Disabled for link(%d)\n",
+ __func__, link->link_index);
link->dpia_bw_alloc_config.bw_alloc_enabled = false;
break;
}
@@ -405,21 +433,21 @@ int dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int pea
if (link->hpd_status && peak_bw > 0) {
// If DP over USB4 then we need to check BW allocation
- link->dpia_bw_alloc_config.sink_max_bw = peak_bw;
- set_usb4_req_bw_req(link, link->dpia_bw_alloc_config.sink_max_bw);
+ link->dpia_bw_alloc_config.link_max_bw = peak_bw;
+ set_usb4_req_bw_req(link, link->dpia_bw_alloc_config.link_max_bw);
do {
- if (!(timeout > 0))
+ if (timeout > 0)
timeout--;
else
break;
- fsleep(10 * 1000);
+ msleep(10);
} while (!get_cm_response_ready_flag(link));
if (!timeout)
ret = 0;// ERROR TIMEOUT waiting for response for allocating bw
- else if (link->dpia_bw_alloc_config.sink_allocated_bw > 0)
- ret = get_host_router_total_bw(link, HOST_ROUTER_BW_ALLOCATED);
+ else if (link->dpia_bw_alloc_config.allocated_bw > 0)
+ ret = link->dpia_bw_alloc_config.allocated_bw;
}
//2. Cold Unplug
else if (!link->hpd_status)
@@ -428,65 +456,102 @@ int dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int pea
out:
return ret;
}
-int link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw)
+bool link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw)
{
- int ret = 0;
+ bool ret = false;
uint8_t timeout = 10;
+ DC_LOG_DEBUG("%s: ENTER: link(%d), hpd_status(%d), current allocated_bw(%d), req_bw(%d)\n",
+ __func__, link->link_index, link->hpd_status,
+ link->dpia_bw_alloc_config.allocated_bw, req_bw);
+
if (!get_bw_alloc_proceed_flag(link))
goto out;
- /*
- * Sometimes stream uses same timing parameters as the already
- * allocated max sink bw so no need to re-alloc
- */
- if (req_bw != link->dpia_bw_alloc_config.sink_allocated_bw) {
- set_usb4_req_bw_req(link, req_bw);
- do {
- if (!(timeout > 0))
- timeout--;
- else
- break;
- udelay(10 * 1000);
- } while (!get_cm_response_ready_flag(link));
+ set_usb4_req_bw_req(link, req_bw);
+ do {
+ if (timeout > 0)
+ timeout--;
+ else
+ break;
+ msleep(10);
+ } while (!get_cm_response_ready_flag(link));
- if (!timeout)
- ret = 0;// ERROR TIMEOUT waiting for response for allocating bw
- else if (link->dpia_bw_alloc_config.sink_allocated_bw > 0)
- ret = get_host_router_total_bw(link, HOST_ROUTER_BW_ALLOCATED);
- }
+ if (timeout)
+ ret = true;
out:
+ DC_LOG_DEBUG("%s: EXIT: timeout(%d), ret(%d)\n", __func__, timeout, ret);
return ret;
}
+
bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed_per_dpia, const unsigned int num_dpias)
{
bool ret = true;
- int bw_needed_per_hr[MAX_HR_NUM] = { 0, 0 };
- uint8_t lowest_dpia_index = 0, dpia_index = 0;
- uint8_t i;
+ int bw_needed_per_hr[MAX_HR_NUM] = { 0, 0 }, host_router_total_dp_bw = 0;
+ uint8_t lowest_dpia_index, i, hr_index;
if (!num_dpias || num_dpias > MAX_DPIA_NUM)
return ret;
- //Get total Host Router BW & Validate against each Host Router max BW
+ lowest_dpia_index = get_lowest_dpia_index(link[0]);
+
+ /* get total Host Router BW with granularity for the given modes */
for (i = 0; i < num_dpias; ++i) {
+ int granularity_Gbps = 0;
+ int bw_granularity = 0;
if (!link[i]->dpia_bw_alloc_config.bw_alloc_enabled)
continue;
- lowest_dpia_index = get_lowest_dpia_index(link[i]);
if (link[i]->link_index < lowest_dpia_index)
continue;
- dpia_index = (link[i]->link_index - lowest_dpia_index) / 2;
- bw_needed_per_hr[dpia_index] += bw_needed_per_dpia[i];
- if (bw_needed_per_hr[dpia_index] > get_host_router_total_bw(link[i], HOST_ROUTER_BW_ALLOCATED)) {
+ granularity_Gbps = (Kbps_TO_Gbps / link[i]->dpia_bw_alloc_config.bw_granularity);
+ bw_granularity = (bw_needed_per_dpia[i] / granularity_Gbps) * granularity_Gbps +
+ ((bw_needed_per_dpia[i] % granularity_Gbps) ? granularity_Gbps : 0);
- ret = false;
- break;
+ hr_index = (link[i]->link_index - lowest_dpia_index) / 2;
+ bw_needed_per_hr[hr_index] += bw_granularity;
+ }
+
+ /* validate against each Host Router max BW */
+ for (hr_index = 0; hr_index < MAX_HR_NUM; ++hr_index) {
+ if (bw_needed_per_hr[hr_index]) {
+ host_router_total_dp_bw = get_host_router_total_dp_tunnel_bw(link[0]->dc, hr_index);
+ if (bw_needed_per_hr[hr_index] > host_router_total_dp_bw) {
+ ret = false;
+ break;
+ }
}
}
return ret;
}
+
+int link_dp_dpia_get_dp_overhead_in_dp_tunneling(struct dc_link *link)
+{
+ int dp_overhead = 0, link_mst_overhead = 0;
+
+ if (!get_bw_alloc_proceed_flag((link)))
+ return dp_overhead;
+
+ /* if its mst link, add MTPH overhead */
+ if ((link->type == dc_connection_mst_branch) &&
+ !link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) {
+ /* For 8b/10b encoding: MTP is 64 time slots long, slot 0 is used for MTPH
+ * MST overhead is 1/64 of link bandwidth (excluding any overhead)
+ */
+ const struct dc_link_settings *link_cap =
+ dc_link_get_link_cap(link);
+ uint32_t link_bw_in_kbps = (uint32_t)link_cap->link_rate *
+ (uint32_t)link_cap->lane_count *
+ LINK_RATE_REF_FREQ_IN_KHZ * 8;
+ link_mst_overhead = (link_bw_in_kbps / 64) + ((link_bw_in_kbps % 64) ? 1 : 0);
+ }
+
+ /* add all the overheads */
+ dp_overhead = link_mst_overhead;
+
+ return dp_overhead;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
index 7292690383ae..3b6d8494f9d5 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
@@ -59,9 +59,9 @@ bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link);
* @link: pointer to the dc_link struct instance
* @req_bw: Bw requested by the stream
*
- * return: allocated bw else return 0
+ * return: true if allocated successfully
*/
-int link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw);
+bool link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw);
/*
* Handle the USB4 BW Allocation related functionality here:
@@ -99,4 +99,13 @@ void dpia_handle_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t res
*/
bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed, const unsigned int num_dpias);
+/*
+ * Obtain all the DP overheads in dp tunneling for the dpia link
+ *
+ * @link: pointer to the dc_link struct instance
+ *
+ * return: DP overheads in DP tunneling
+ */
+int link_dp_dpia_get_dp_overhead_in_dp_tunneling(struct dc_link *link);
+
#endif /* DC_INC_LINK_DP_DPIA_BW_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
index 0c00e94e90b1..ba69874be5a4 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
@@ -190,9 +190,6 @@ static void handle_hpd_irq_replay_sink(struct dc_link *link)
/*AMD Replay version reuse DP_PSR_ERROR_STATUS for REPLAY_ERROR status.*/
union psr_error_status replay_error_status;
- if (link->replay_settings.config.force_disable_desync_error_check)
- return;
-
if (!link->replay_settings.replay_feature_enabled)
return;
@@ -210,9 +207,6 @@ static void handle_hpd_irq_replay_sink(struct dc_link *link)
&replay_error_status.raw,
sizeof(replay_error_status.raw));
- if (replay_configuration.bits.DESYNC_ERROR_STATUS)
- link->replay_settings.config.received_desync_error_hpd = 1;
-
link->replay_settings.config.replay_error_status.bits.LINK_CRC_ERROR =
replay_error_status.bits.LINK_CRC_ERROR;
link->replay_settings.config.replay_error_status.bits.DESYNC_ERROR =
@@ -225,6 +219,12 @@ static void handle_hpd_irq_replay_sink(struct dc_link *link)
link->replay_settings.config.replay_error_status.bits.STATE_TRANSITION_ERROR) {
bool allow_active;
+ if (link->replay_settings.config.replay_error_status.bits.DESYNC_ERROR)
+ link->replay_settings.config.received_desync_error_hpd = 1;
+
+ if (link->replay_settings.config.force_disable_desync_error_check)
+ return;
+
/* Acknowledge and clear configuration bits */
dm_helpers_dp_write_dpcd(
link->ctx,
@@ -265,7 +265,7 @@ void dp_handle_link_loss(struct dc_link *link)
for (i = count - 1; i >= 0; i--) {
// Always use max settings here for DP 1.4a LL Compliance CTS
- if (link->is_automated) {
+ if (link->skip_fallback_on_link_loss) {
pipes[i]->link_config.dp_link_settings.lane_count =
link->verified_link_cap.lane_count;
pipes[i]->link_config.dp_link_settings.link_rate =
@@ -404,7 +404,9 @@ bool dp_handle_hpd_rx_irq(struct dc_link *link,
if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
// Workaround for DP 1.4a LL Compliance CTS as USB4 has to share encoders unlike DP and USBC
- link->is_automated = true;
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ link->skip_fallback_on_link_loss = true;
+
device_service_clear.bits.AUTOMATED_TEST = 1;
core_link_write_dpcd(
link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
index 90339c2dfd84..5a0b04518956 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
@@ -807,7 +807,7 @@ void dp_decide_lane_settings(
const struct link_training_settings *lt_settings,
const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX],
struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
- union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX])
+ union dpcd_training_lane *dpcd_lane_settings)
{
uint32_t lane;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h
index 7d027bac8255..851bd17317a0 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h
@@ -111,7 +111,7 @@ void dp_decide_lane_settings(
const struct link_training_settings *lt_settings,
const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX],
struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
- union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX]);
+ union dpcd_training_lane *dpcd_lane_settings);
enum dc_dp_training_pattern decide_cr_training_pattern(
const struct dc_link_settings *link_settings);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
index 4f4e899e5c46..e8dda44b23cb 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
@@ -811,7 +811,7 @@ static enum link_training_result dpia_training_eq_transparent(
/* Take into consideration corner case for DP 1.4a LL Compliance CTS as USB4
* has to share encoders unlike DP and USBC
*/
- if (dp_is_interlane_aligned(dpcd_lane_status_updated) || (link->is_automated && retries_eq)) {
+ if (dp_is_interlane_aligned(dpcd_lane_status_updated) || (link->skip_fallback_on_link_loss && retries_eq)) {
result = LINK_TRAINING_SUCCESS;
break;
}
@@ -1037,7 +1037,7 @@ enum link_training_result dpia_perform_link_training(
*/
if (result == LINK_TRAINING_SUCCESS) {
fsleep(5000);
- if (!link->is_automated)
+ if (!link->skip_fallback_on_link_loss)
result = dp_check_link_loss_status(link, &lt_settings);
} else if (result == LINK_TRAINING_ABORT)
dpia_training_abort(link, &lt_settings, repeater_id);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
index ac0fa88b52a0..046d3e205415 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
@@ -287,8 +287,8 @@ bool set_default_brightness_aux(struct dc_link *link)
if (link && link->dpcd_sink_ext_caps.bits.oled == 1) {
if (!read_default_bl_aux(link, &default_backlight))
default_backlight = 150000;
- // if > 5000, it might be wrong readback
- if (default_backlight > 5000000)
+ // if < 1 nits or > 5000, it might be wrong readback
+ if (default_backlight < 1000 || default_backlight > 5000000)
default_backlight = 150000;
return edp_set_backlight_level_nits(link, true,
@@ -529,6 +529,9 @@ bool edp_set_backlight_level(const struct dc_link *link,
if (dc_is_embedded_signal(link->connector_signal)) {
struct pipe_ctx *pipe_ctx = get_pipe_from_link(link);
+ if (link->panel_cntl)
+ link->panel_cntl->stored_backlight_registers.USER_LEVEL = backlight_pwm_u16_16;
+
if (pipe_ctx) {
/* Disable brightness ramping when the display is blanked
* as it can hang the DMCU
@@ -927,8 +930,8 @@ bool edp_get_replay_state(const struct dc_link *link, uint64_t *state)
bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream)
{
/* To-do: Setup Replay */
- struct dc *dc = link->ctx->dc;
- struct dmub_replay *replay = dc->res_pool->replay;
+ struct dc *dc;
+ struct dmub_replay *replay;
int i;
unsigned int panel_inst;
struct replay_context replay_context = { 0 };
@@ -944,6 +947,10 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream
if (!link)
return false;
+ dc = link->ctx->dc;
+
+ replay = dc->res_pool->replay;
+
if (!replay)
return false;
@@ -972,8 +979,7 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream
replay_context.line_time_in_ns = lineTimeInNs;
- if (replay)
- link->replay_settings.replay_feature_enabled =
+ link->replay_settings.replay_feature_enabled =
replay->funcs->replay_copy_settings(replay, link, &replay_context, panel_inst);
if (link->replay_settings.replay_feature_enabled) {
@@ -1065,6 +1071,33 @@ bool edp_replay_residency(const struct dc_link *link,
return true;
}
+bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link,
+ const unsigned int *power_opts, uint16_t coasting_vtotal)
+{
+ struct dc *dc = link->ctx->dc;
+ struct dmub_replay *replay = dc->res_pool->replay;
+ unsigned int panel_inst;
+
+ if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
+ return false;
+
+ /* Only both power and coasting vtotal changed, this func could return true */
+ if (power_opts && link->replay_settings.replay_power_opt_active != *power_opts &&
+ coasting_vtotal && link->replay_settings.coasting_vtotal != coasting_vtotal) {
+ if (link->replay_settings.replay_feature_enabled &&
+ replay->funcs->replay_set_power_opt_and_coasting_vtotal) {
+ replay->funcs->replay_set_power_opt_and_coasting_vtotal(replay,
+ *power_opts, panel_inst, coasting_vtotal);
+ link->replay_settings.replay_power_opt_active = *power_opts;
+ link->replay_settings.coasting_vtotal = coasting_vtotal;
+ } else
+ return false;
+ } else
+ return false;
+
+ return true;
+}
+
static struct abm *get_abm_from_stream_res(const struct dc_link *link)
{
int i;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
index b7493ff4fcee..34e521af7bb4 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
@@ -63,6 +63,8 @@ bool edp_set_coasting_vtotal(struct dc_link *link, uint16_t coasting_vtotal);
bool edp_replay_residency(const struct dc_link *link,
unsigned int *residency, const bool is_start, const bool is_alpm);
bool edp_get_replay_state(const struct dc_link *link, uint64_t *state);
+bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link,
+ const unsigned int *power_opts, uint16_t coasting_vtotal);
bool edp_wait_for_t12(struct dc_link *link);
bool edp_is_ilr_optimization_required(struct dc_link *link,
struct dc_crtc_timing *crtc_timing);
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
index a2c4db2cebdd..823493543325 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
@@ -166,6 +166,16 @@ static bool optc32_disable_crtc(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ REG_UPDATE_5(OPTC_DATA_SOURCE_SELECT,
+ OPTC_SEG0_SRC_SEL, 0xf,
+ OPTC_SEG1_SRC_SEL, 0xf,
+ OPTC_SEG2_SRC_SEL, 0xf,
+ OPTC_SEG3_SRC_SEL, 0xf,
+ OPTC_NUM_OF_INPUT_SEGMENT, 0);
+
+ REG_UPDATE(OPTC_MEMORY_CONFIG,
+ OPTC_MEM_SEL, 0);
+
/* disable otg request until end of the first line
* in the vertical blank region
*/
@@ -198,6 +208,13 @@ static void optc32_disable_phantom_otg(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ REG_UPDATE_5(OPTC_DATA_SOURCE_SELECT,
+ OPTC_SEG0_SRC_SEL, 0xf,
+ OPTC_SEG1_SRC_SEL, 0xf,
+ OPTC_SEG2_SRC_SEL, 0xf,
+ OPTC_SEG3_SRC_SEL, 0xf,
+ OPTC_NUM_OF_INPUT_SEGMENT, 0);
+
REG_UPDATE(OTG_CONTROL, OTG_MASTER_EN, 0);
}
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
index a4a39f1638cf..5b1547508850 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
@@ -138,6 +138,16 @@ static bool optc35_disable_crtc(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ REG_UPDATE_5(OPTC_DATA_SOURCE_SELECT,
+ OPTC_SEG0_SRC_SEL, 0xf,
+ OPTC_SEG1_SRC_SEL, 0xf,
+ OPTC_SEG2_SRC_SEL, 0xf,
+ OPTC_SEG3_SRC_SEL, 0xf,
+ OPTC_NUM_OF_INPUT_SEGMENT, 0);
+
+ REG_UPDATE(OPTC_MEMORY_CONFIG,
+ OPTC_MEM_SEL, 0);
+
/* disable otg request until end of the first line
* in the vertical blank region
*/
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
index af1b31f4e69a..d08d10969251 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
@@ -1250,7 +1250,10 @@ struct stream_encoder *dcn10_find_first_free_match_stream_enc_for_link(
/* Store first available for MST second display
* in daisy chain use case
*/
- j = i;
+
+ if (pool->stream_enc[i]->id != ENGINE_ID_VIRTUAL)
+ j = i;
+
if (link->ep_type == DISPLAY_ENDPOINT_PHY && pool->stream_enc[i]->id ==
link->link_enc->preferred_engine)
return pool->stream_enc[i];
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
index 2b6dcb489b90..37a64186f324 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
@@ -1682,6 +1682,7 @@ noinline bool dcn30_internal_validate_bw(
* We don't actually support prefetch mode 2, so require that we
* at least support prefetch mode 1.
*/
+ context->bw_ctx.dml.validate_max_state = fast_validate;
context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank =
dm_allow_self_refresh;
@@ -1691,6 +1692,7 @@ noinline bool dcn30_internal_validate_bw(
memset(merge, 0, sizeof(merge));
vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge);
}
+ context->bw_ctx.dml.validate_max_state = false;
}
dml_log_mode_support_params(&context->bw_ctx.dml);
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
index f6cbcc9b4006..c4d71e7f18af 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
@@ -89,6 +89,8 @@
#include "dcn20/dcn20_vmid.h"
#include "dml/dcn32/dcn32_fpu.h"
+#include "dc_state_priv.h"
+
#include "dml2/dml2_wrapper.h"
#define DC_LOGGER_INIT(logger)
@@ -1644,7 +1646,7 @@ static void dcn32_enable_phantom_plane(struct dc *dc,
if (curr_pipe->top_pipe && curr_pipe->top_pipe->plane_state == curr_pipe->plane_state)
phantom_plane = prev_phantom_plane;
else
- phantom_plane = dc_create_plane_state(dc);
+ phantom_plane = dc_state_create_phantom_plane(dc, context, curr_pipe->plane_state);
memcpy(&phantom_plane->address, &curr_pipe->plane_state->address, sizeof(phantom_plane->address));
memcpy(&phantom_plane->scaling_quality, &curr_pipe->plane_state->scaling_quality,
@@ -1665,9 +1667,7 @@ static void dcn32_enable_phantom_plane(struct dc *dc,
phantom_plane->clip_rect.y = 0;
phantom_plane->clip_rect.height = phantom_stream->src.height;
- phantom_plane->is_phantom = true;
-
- dc_add_plane_to_context(dc, phantom_stream, phantom_plane, context);
+ dc_state_add_phantom_plane(dc, phantom_stream, phantom_plane, context);
curr_pipe = curr_pipe->bottom_pipe;
prev_phantom_plane = phantom_plane;
@@ -1683,13 +1683,7 @@ static struct dc_stream_state *dcn32_enable_phantom_stream(struct dc *dc,
struct dc_stream_state *phantom_stream = NULL;
struct pipe_ctx *ref_pipe = &context->res_ctx.pipe_ctx[dc_pipe_idx];
- phantom_stream = dc_create_stream_for_sink(ref_pipe->stream->sink);
- phantom_stream->signal = SIGNAL_TYPE_VIRTUAL;
- phantom_stream->dpms_off = true;
- phantom_stream->mall_stream_config.type = SUBVP_PHANTOM;
- phantom_stream->mall_stream_config.paired_stream = ref_pipe->stream;
- ref_pipe->stream->mall_stream_config.type = SUBVP_MAIN;
- ref_pipe->stream->mall_stream_config.paired_stream = phantom_stream;
+ phantom_stream = dc_state_create_phantom_stream(dc, context, ref_pipe->stream);
/* stream has limited viewport and small timing */
memcpy(&phantom_stream->timing, &ref_pipe->stream->timing, sizeof(phantom_stream->timing));
@@ -1699,81 +1693,10 @@ static struct dc_stream_state *dcn32_enable_phantom_stream(struct dc *dc,
dcn32_set_phantom_stream_timing(dc, context, ref_pipe, phantom_stream, pipes, pipe_cnt, dc_pipe_idx);
DC_FP_END();
- dc_add_stream_to_ctx(dc, context, phantom_stream);
+ dc_state_add_phantom_stream(dc, context, phantom_stream, ref_pipe->stream);
return phantom_stream;
}
-void dcn32_retain_phantom_pipes(struct dc *dc, struct dc_state *context)
-{
- int i;
- struct dc_plane_state *phantom_plane = NULL;
- struct dc_stream_state *phantom_stream = NULL;
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
- if (resource_is_pipe_type(pipe, OTG_MASTER) &&
- resource_is_pipe_type(pipe, DPP_PIPE) &&
- pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
- phantom_plane = pipe->plane_state;
- phantom_stream = pipe->stream;
-
- dc_plane_state_retain(phantom_plane);
- dc_stream_retain(phantom_stream);
- }
- }
-}
-
-// return true if removed piped from ctx, false otherwise
-bool dcn32_remove_phantom_pipes(struct dc *dc, struct dc_state *context, bool fast_update)
-{
- int i;
- bool removed_pipe = false;
- struct dc_plane_state *phantom_plane = NULL;
- struct dc_stream_state *phantom_stream = NULL;
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- // build scaling params for phantom pipes
- if (pipe->plane_state && pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
- phantom_plane = pipe->plane_state;
- phantom_stream = pipe->stream;
-
- dc_rem_all_planes_for_stream(dc, pipe->stream, context);
- dc_remove_stream_from_ctx(dc, context, pipe->stream);
-
- /* Ref count is incremented on allocation and also when added to the context.
- * Therefore we must call release for the the phantom plane and stream once
- * they are removed from the ctx to finally decrement the refcount to 0 to free.
- */
- dc_plane_state_release(phantom_plane);
- dc_stream_release(phantom_stream);
-
- removed_pipe = true;
- }
-
- /* For non-full updates, a shallow copy of the current state
- * is created. In this case we don't want to erase the current
- * state (there can be 2 HIRQL threads, one in flip, and one in
- * checkMPO) that can cause a race condition.
- *
- * This is just a workaround, needs a proper fix.
- */
- if (!fast_update) {
- // Clear all phantom stream info
- if (pipe->stream) {
- pipe->stream->mall_stream_config.type = SUBVP_NONE;
- pipe->stream->mall_stream_config.paired_stream = NULL;
- }
-
- if (pipe->plane_state) {
- pipe->plane_state->is_phantom = false;
- }
- }
- }
- return removed_pipe;
-}
-
/* TODO: Input to this function should indicate which pipe indexes (or streams)
* require a phantom pipe / stream
*/
@@ -1798,7 +1721,7 @@ void dcn32_add_phantom_pipes(struct dc *dc, struct dc_state *context,
// We determine which phantom pipes were added by comparing with
// the phantom stream.
if (pipe->plane_state && pipe->stream && pipe->stream == phantom_stream &&
- pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
pipe->stream->use_dynamic_meta = false;
pipe->plane_state->flip_immediate = false;
if (!resource_build_scaling_params(pipe)) {
@@ -1817,7 +1740,6 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_val
int vlevel = 0;
int pipe_cnt = 0;
display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
- struct mall_temp_config mall_temp_config;
/* To handle Freesync properly, setting FreeSync DML parameters
* to its default state for the first stage of validation
@@ -1827,29 +1749,12 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_val
DC_LOGGER_INIT(dc->ctx->logger);
- /* For fast validation, there are situations where a shallow copy of
- * of the dc->current_state is created for the validation. In this case
- * we want to save and restore the mall config because we always
- * teardown subvp at the beginning of validation (and don't attempt
- * to add it back if it's fast validation). If we don't restore the
- * subvp config in cases of fast validation + shallow copy of the
- * dc->current_state, the dc->current_state will have a partially
- * removed subvp state when we did not intend to remove it.
- */
- if (fast_validate) {
- memset(&mall_temp_config, 0, sizeof(mall_temp_config));
- dcn32_save_mall_state(dc, context, &mall_temp_config);
- }
-
BW_VAL_TRACE_COUNT();
DC_FP_START();
out = dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
DC_FP_END();
- if (fast_validate)
- dcn32_restore_mall_state(dc, context, &mall_temp_config);
-
if (pipe_cnt == 0)
goto validate_out;
@@ -1933,7 +1838,7 @@ int dcn32_populate_dml_pipes_from_context(
* This is just a workaround -- needs a proper fix.
*/
if (!fast_validate) {
- switch (pipe->stream->mall_stream_config.type) {
+ switch (dc_state_get_pipe_subvp_type(context, pipe)) {
case SUBVP_MAIN:
pipes[pipe_cnt].pipe.src.use_mall_for_pstate_change = dm_use_mall_pstate_change_sub_viewport;
subvp_in_use = true;
@@ -2037,10 +1942,6 @@ static struct resource_funcs dcn32_res_pool_funcs = {
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.update_soc_for_wm_a = dcn30_update_soc_for_wm_a,
.add_phantom_pipes = dcn32_add_phantom_pipes,
- .remove_phantom_pipes = dcn32_remove_phantom_pipes,
- .retain_phantom_pipes = dcn32_retain_phantom_pipes,
- .save_mall_state = dcn32_save_mall_state,
- .restore_mall_state = dcn32_restore_mall_state,
.build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params,
};
@@ -2454,16 +2355,19 @@ static bool dcn32_resource_construct(
dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head;
dc->dml2_options.svp_pstate.callbacks.dc = dc;
- dc->dml2_options.svp_pstate.callbacks.add_plane_to_context = &dc_add_plane_to_context;
- dc->dml2_options.svp_pstate.callbacks.add_stream_to_ctx = &dc_add_stream_to_ctx;
+ dc->dml2_options.svp_pstate.callbacks.add_phantom_plane = &dc_state_add_phantom_plane;
+ dc->dml2_options.svp_pstate.callbacks.add_phantom_stream = &dc_state_add_phantom_stream;
dc->dml2_options.svp_pstate.callbacks.build_scaling_params = &resource_build_scaling_params;
- dc->dml2_options.svp_pstate.callbacks.create_plane = &dc_create_plane_state;
- dc->dml2_options.svp_pstate.callbacks.remove_plane_from_context = &dc_remove_plane_from_context;
- dc->dml2_options.svp_pstate.callbacks.remove_stream_from_ctx = &dc_remove_stream_from_ctx;
- dc->dml2_options.svp_pstate.callbacks.create_stream_for_sink = &dc_create_stream_for_sink;
- dc->dml2_options.svp_pstate.callbacks.plane_state_release = &dc_plane_state_release;
- dc->dml2_options.svp_pstate.callbacks.stream_release = &dc_stream_release;
+ dc->dml2_options.svp_pstate.callbacks.create_phantom_plane = &dc_state_create_phantom_plane;
+ dc->dml2_options.svp_pstate.callbacks.remove_phantom_plane = &dc_state_remove_phantom_plane;
+ dc->dml2_options.svp_pstate.callbacks.remove_phantom_stream = &dc_state_remove_phantom_stream;
+ dc->dml2_options.svp_pstate.callbacks.create_phantom_stream = &dc_state_create_phantom_stream;
+ dc->dml2_options.svp_pstate.callbacks.release_phantom_plane = &dc_state_release_phantom_plane;
+ dc->dml2_options.svp_pstate.callbacks.release_phantom_stream = &dc_state_release_phantom_stream;
dc->dml2_options.svp_pstate.callbacks.release_dsc = &dcn20_release_dsc;
+ dc->dml2_options.svp_pstate.callbacks.get_pipe_subvp_type = &dc_state_get_pipe_subvp_type;
+ dc->dml2_options.svp_pstate.callbacks.get_stream_subvp_type = &dc_state_get_stream_subvp_type;
+ dc->dml2_options.svp_pstate.callbacks.get_paired_subvp_stream = &dc_state_get_paired_subvp_stream;
dc->dml2_options.svp_pstate.subvp_fw_processing_delay_us = dc->caps.subvp_fw_processing_delay_us;
dc->dml2_options.svp_pstate.subvp_prefetch_end_to_mall_start_us = dc->caps.subvp_prefetch_end_to_mall_start_us;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
index b2f20e6cfb38..0c87b0fabba7 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
@@ -91,12 +91,6 @@ bool dcn32_release_post_bldn_3dlut(
struct dc_3dlut **lut,
struct dc_transfer_func **shaper);
-bool dcn32_remove_phantom_pipes(struct dc *dc,
- struct dc_state *context, bool fast_update);
-
-void dcn32_retain_phantom_pipes(struct dc *dc,
- struct dc_state *context);
-
void dcn32_add_phantom_pipes(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
@@ -169,15 +163,7 @@ void dcn32_determine_det_override(struct dc *dc,
void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes);
-void dcn32_save_mall_state(struct dc *dc,
- struct dc_state *context,
- struct mall_temp_config *temp_config);
-
-void dcn32_restore_mall_state(struct dc *dc,
- struct dc_state *context,
- struct mall_temp_config *temp_config);
-
-struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, const struct dc_state *context);
+struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, struct dc_state *context);
bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe);
@@ -193,6 +179,8 @@ bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context);
bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int vlevel);
+void dcn32_update_dml_pipes_odm_policy_based_on_context(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes);
+
/* definitions for run time init of reg offsets */
/* CLK SRC */
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
index 12986fe0b289..74412e5f03fe 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
@@ -92,6 +92,8 @@
#include "vm_helper.h"
#include "dcn20/dcn20_vmid.h"
+#include "dc_state_priv.h"
+
#define DC_LOGGER_INIT(logger)
enum dcn321_clk_src_array_id {
@@ -1605,10 +1607,6 @@ static struct resource_funcs dcn321_res_pool_funcs = {
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.update_soc_for_wm_a = dcn30_update_soc_for_wm_a,
.add_phantom_pipes = dcn32_add_phantom_pipes,
- .remove_phantom_pipes = dcn32_remove_phantom_pipes,
- .retain_phantom_pipes = dcn32_retain_phantom_pipes,
- .save_mall_state = dcn32_save_mall_state,
- .restore_mall_state = dcn32_restore_mall_state,
.build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params,
};
@@ -2008,16 +2006,19 @@ static bool dcn321_resource_construct(
dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head;
dc->dml2_options.svp_pstate.callbacks.dc = dc;
- dc->dml2_options.svp_pstate.callbacks.add_plane_to_context = &dc_add_plane_to_context;
- dc->dml2_options.svp_pstate.callbacks.add_stream_to_ctx = &dc_add_stream_to_ctx;
+ dc->dml2_options.svp_pstate.callbacks.add_phantom_plane = &dc_state_add_phantom_plane;
+ dc->dml2_options.svp_pstate.callbacks.add_phantom_stream = &dc_state_add_phantom_stream;
dc->dml2_options.svp_pstate.callbacks.build_scaling_params = &resource_build_scaling_params;
- dc->dml2_options.svp_pstate.callbacks.create_plane = &dc_create_plane_state;
- dc->dml2_options.svp_pstate.callbacks.remove_plane_from_context = &dc_remove_plane_from_context;
- dc->dml2_options.svp_pstate.callbacks.remove_stream_from_ctx = &dc_remove_stream_from_ctx;
- dc->dml2_options.svp_pstate.callbacks.create_stream_for_sink = &dc_create_stream_for_sink;
- dc->dml2_options.svp_pstate.callbacks.plane_state_release = &dc_plane_state_release;
- dc->dml2_options.svp_pstate.callbacks.stream_release = &dc_stream_release;
+ dc->dml2_options.svp_pstate.callbacks.create_phantom_plane = &dc_state_create_phantom_plane;
+ dc->dml2_options.svp_pstate.callbacks.remove_phantom_plane = &dc_state_remove_phantom_plane;
+ dc->dml2_options.svp_pstate.callbacks.remove_phantom_stream = &dc_state_remove_phantom_stream;
+ dc->dml2_options.svp_pstate.callbacks.create_phantom_stream = &dc_state_create_phantom_stream;
+ dc->dml2_options.svp_pstate.callbacks.release_phantom_plane = &dc_state_release_phantom_plane;
+ dc->dml2_options.svp_pstate.callbacks.release_phantom_stream = &dc_state_release_phantom_stream;
dc->dml2_options.svp_pstate.callbacks.release_dsc = &dcn20_release_dsc;
+ dc->dml2_options.svp_pstate.callbacks.get_pipe_subvp_type = &dc_state_get_pipe_subvp_type;
+ dc->dml2_options.svp_pstate.callbacks.get_stream_subvp_type = &dc_state_get_stream_subvp_type;
+ dc->dml2_options.svp_pstate.callbacks.get_paired_subvp_stream = &dc_state_get_paired_subvp_stream;
dc->dml2_options.svp_pstate.subvp_fw_processing_delay_us = dc->caps.subvp_fw_processing_delay_us;
dc->dml2_options.svp_pstate.subvp_prefetch_end_to_mall_start_us = dc->caps.subvp_prefetch_end_to_mall_start_us;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
index 1a0ed1c7e2d4..761ec9891875 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
@@ -96,12 +96,15 @@
#include "reg_helper.h"
#include "dce/dmub_abm.h"
#include "dce/dmub_psr.h"
+#include "dce/dmub_replay.h"
#include "dce/dce_aux.h"
#include "dce/dce_i2c.h"
#include "dml/dcn31/display_mode_vba_31.h" /*temp*/
#include "vm_helper.h"
#include "dcn20/dcn20_vmid.h"
+#include "dc_state_priv.h"
+
#include "link_enc_cfg.h"
#define DC_LOGGER_INIT(logger)
@@ -717,6 +720,8 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_dcc = DCC_ENABLE,
.disable_dpp_power_gate = true,
.disable_hubp_power_gate = true,
+ .disable_optc_power_gate = true, /*should the same as above two*/
+ .disable_hpo_power_gate = true, /*dmubfw force domain25 on*/
.disable_clock_gate = false,
.disable_dsc_power_gate = true,
.vsr_support = true,
@@ -736,7 +741,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.i2c = true,
.dmcu = false, // This is previously known to cause hang on S3 cycles if enabled
.dscl = true,
- .cm = false,
+ .cm = true,
.mpc = true,
.optc = true,
.vpg = true,
@@ -764,7 +769,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.enable_hpo_pg_support = false,
.enable_legacy_fast_update = true,
.enable_single_display_2to1_odm_policy = false,
- .disable_idle_power_optimizations = true,
+ .disable_idle_power_optimizations = false,
.dmcub_emulation = false,
.disable_boot_optimizations = false,
.disable_unbounded_requesting = false,
@@ -776,13 +781,15 @@ static const struct dc_debug_options debug_defaults_drv = {
.ignore_pg = true,
.psp_disabled_wa = true,
.ips2_eval_delay_us = 200,
- .ips2_entry_delay_us = 400
+ .ips2_entry_delay_us = 400,
+ .static_screen_wait_frames = 2,
};
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
.disallow_psrsu = false,
+ .disallow_replay = false,
},
.ilr = {
.optimize_edp_link_rate = true,
@@ -1541,6 +1548,9 @@ static void dcn35_resource_destruct(struct dcn35_resource_pool *pool)
if (pool->base.psr != NULL)
dmub_psr_destroy(&pool->base.psr);
+ if (pool->base.replay != NULL)
+ dmub_replay_destroy(&pool->base.replay);
+
if (pool->base.pg_cntl != NULL)
dcn_pg_cntl_destroy(&pool->base.pg_cntl);
@@ -2025,6 +2035,14 @@ static bool dcn35_resource_construct(
goto create_fail;
}
+ /* Replay */
+ pool->base.replay = dmub_replay_create(ctx);
+ if (pool->base.replay == NULL) {
+ dm_error("DC: failed to create replay obj!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+
/* ABM */
for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
pool->base.multiple_abms[i] = dmub_abm_create(ctx,
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index df63aa8f01e9..c78c9224ab60 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -86,6 +86,7 @@ enum dmub_status {
DMUB_STATUS_TIMEOUT,
DMUB_STATUS_INVALID,
DMUB_STATUS_HW_FAILURE,
+ DMUB_STATUS_POWER_STATE_D3
};
/* enum dmub_asic - dmub asic identifier */
@@ -150,6 +151,13 @@ enum dmub_memory_access_type {
DMUB_MEMORY_ACCESS_DMA
};
+/* enum dmub_power_state type - to track DC power state in dmub_srv */
+enum dmub_srv_power_state_type {
+ DMUB_POWER_STATE_UNDEFINED = 0,
+ DMUB_POWER_STATE_D0 = 1,
+ DMUB_POWER_STATE_D3 = 8
+};
+
/**
* struct dmub_region - dmub hw memory region
* @base: base address for region, must be 256 byte aligned
@@ -485,6 +493,8 @@ struct dmub_srv {
/* Feature capabilities reported by fw */
struct dmub_feature_caps feature_caps;
struct dmub_visual_confirm_color visual_confirm_color;
+
+ enum dmub_srv_power_state_type power_state;
};
/**
@@ -889,6 +899,18 @@ enum dmub_status dmub_srv_clear_inbox0_ack(struct dmub_srv *dmub);
*/
void dmub_srv_subvp_save_surf_addr(struct dmub_srv *dmub, const struct dc_plane_address *addr, uint8_t subvp_index);
+/**
+ * dmub_srv_set_power_state() - Track DC power state in dmub_srv
+ * @dmub: The dmub service
+ * @power_state: DC power state setting
+ *
+ * Store DC power state in dmub_srv. If dmub_srv is in D3, then don't send messages to DMUB
+ *
+ * Return:
+ * void
+ */
+void dmub_srv_set_power_state(struct dmub_srv *dmub, enum dmub_srv_power_state_type dmub_srv_power_state);
+
#if defined(__cplusplus)
}
#endif
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index a365f6c096de..c64b6c848ef7 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -653,7 +653,7 @@ union dmub_fw_boot_options {
uint32_t gpint_scratch8: 1; /* 1 if GPINT is in scratch8*/
uint32_t usb4_cm_version: 1; /**< 1 CM support */
uint32_t dpia_hpd_int_enable_supported: 1; /* 1 if dpia hpd int enable supported */
- uint32_t usb4_dpia_bw_alloc_supported: 1; /* 1 if USB4 dpia BW allocation supported */
+ uint32_t reserved0: 1;
uint32_t disable_clk_ds: 1; /* 1 if disallow dispclk_ds and dppclk_ds*/
uint32_t disable_timeout_recovery : 1; /* 1 if timeout recovery should be disabled */
uint32_t ips_pg_disable: 1; /* 1 to disable ONO domains power gating*/
@@ -3498,6 +3498,16 @@ struct dmub_cmd_abm_set_pipe_data {
* TODO: Remove.
*/
uint8_t ramping_boundary;
+
+ /**
+ * PwrSeq HW Instance.
+ */
+ uint8_t pwrseq_inst;
+
+ /**
+ * Explicit padding to 4 byte boundary.
+ */
+ uint8_t pad[3];
};
/**
@@ -3878,7 +3888,7 @@ enum dmub_cmd_panel_cntl_type {
* struct dmub_cmd_panel_cntl_data - Panel control data.
*/
struct dmub_cmd_panel_cntl_data {
- uint32_t inst; /**< panel instance */
+ uint32_t pwrseq_inst; /**< pwrseq instance */
uint32_t current_backlight; /* in/out */
uint32_t bl_pwm_cntl; /* in/out */
uint32_t bl_pwm_period_cntl; /* in/out */
@@ -3937,7 +3947,7 @@ struct dmub_cmd_lvtma_control_data {
uint8_t uc_pwr_action; /**< LVTMA_ACTION */
uint8_t bypass_panel_control_wait;
uint8_t reserved_0[2]; /**< For future use */
- uint8_t panel_inst; /**< LVTMA control instance */
+ uint8_t pwrseq_inst; /**< LVTMA control instance */
uint8_t reserved_1[3]; /**< For future use */
};
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index 38360adc53d9..9ad738805320 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -64,7 +64,7 @@
/* Default scratch mem size. */
-#define DMUB_SCRATCH_MEM_SIZE (256)
+#define DMUB_SCRATCH_MEM_SIZE (1024)
/* Number of windows in use. */
#define DMUB_NUM_WINDOWS (DMUB_WINDOW_TOTAL)
@@ -713,6 +713,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
dmub->hw_funcs.reset_release(dmub);
dmub->hw_init = true;
+ dmub->power_state = DMUB_POWER_STATE_D0;
return DMUB_STATUS_OK;
}
@@ -766,6 +767,9 @@ enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
if (!dmub->hw_init)
return DMUB_STATUS_INVALID;
+ if (dmub->power_state != DMUB_POWER_STATE_D0)
+ return DMUB_STATUS_POWER_STATE_D3;
+
if (dmub->inbox1_rb.rptr > dmub->inbox1_rb.capacity ||
dmub->inbox1_rb.wrpt > dmub->inbox1_rb.capacity) {
return DMUB_STATUS_HW_FAILURE;
@@ -784,6 +788,9 @@ enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub)
if (!dmub->hw_init)
return DMUB_STATUS_INVALID;
+ if (dmub->power_state != DMUB_POWER_STATE_D0)
+ return DMUB_STATUS_POWER_STATE_D3;
+
/**
* Read back all the queued commands to ensure that they've
* been flushed to framebuffer memory. Otherwise DMCUB might
@@ -1100,3 +1107,11 @@ void dmub_srv_subvp_save_surf_addr(struct dmub_srv *dmub, const struct dc_plane_
subvp_index);
}
}
+
+void dmub_srv_set_power_state(struct dmub_srv *dmub, enum dmub_srv_power_state_type dmub_srv_power_state)
+{
+ if (!dmub || !dmub->hw_init)
+ return;
+
+ dmub->power_state = dmub_srv_power_state;
+}
diff --git a/drivers/gpu/drm/amd/display/include/audio_types.h b/drivers/gpu/drm/amd/display/include/audio_types.h
index 66a54da0641c..915a031a43cb 100644
--- a/drivers/gpu/drm/amd/display/include/audio_types.h
+++ b/drivers/gpu/drm/amd/display/include/audio_types.h
@@ -64,7 +64,7 @@ enum audio_dto_source {
/* PLL information required for AZALIA DTO calculation */
struct audio_pll_info {
- uint32_t dp_dto_source_clock_in_khz;
+ uint32_t audio_dto_source_clock_in_khz;
uint32_t feed_back_divider;
enum audio_dto_source dto_source;
bool ss_enabled;
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
index bc96d0211360..813463ffe15c 100644
--- a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
+++ b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
@@ -417,6 +417,8 @@ struct integrated_info {
/* V2.1 */
struct edp_info edp1_info;
struct edp_info edp2_info;
+ uint32_t gpuclk_ss_percentage;
+ uint32_t gpuclk_ss_type;
};
/*
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index 47296d155c3a..3955b7e4b2e2 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -81,6 +81,7 @@ fail_alloc_context:
void mod_freesync_destroy(struct mod_freesync *mod_freesync)
{
struct core_freesync *core_freesync = NULL;
+
if (mod_freesync == NULL)
return;
core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
@@ -278,9 +279,8 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
}
} else if (last_render_time_in_us > (max_render_time_in_us + in_out_vrr->btr.margin_in_us / 2)) {
/* Enter Below the Range */
- if (!in_out_vrr->btr.btr_active) {
+ if (!in_out_vrr->btr.btr_active)
in_out_vrr->btr.btr_active = true;
- }
}
/* BTR set to "not active" so disengage */
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
index 1ddb4f5eac8e..182e7532dda8 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
@@ -63,6 +63,7 @@ static inline enum mod_hdcp_status check_hdcp_capable_dp(struct mod_hdcp *hdcp)
static inline enum mod_hdcp_status check_r0p_available_dp(struct mod_hdcp *hdcp)
{
enum mod_hdcp_status status;
+
if (is_dp_hdcp(hdcp)) {
status = (hdcp->auth.msg.hdcp1.bstatus &
DP_BSTATUS_R0_PRIME_READY) ?
@@ -131,9 +132,8 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
static inline enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
{
/* Avoid device count == 0 to do authentication */
- if (0 == get_device_count(hdcp)) {
+ if (get_device_count(hdcp) == 0)
return MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE;
- }
/* Some MST display may choose to report the internal panel as an HDCP RX.
* To update this condition with 1(because the immediate repeater's internal
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
index 91c22b96ebde..733f22bed021 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
@@ -208,9 +208,8 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
static enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
{
/* Avoid device count == 0 to do authentication */
- if (0 == get_device_count(hdcp)) {
+ if (get_device_count(hdcp) == 0)
return MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE;
- }
/* Some MST display may choose to report the internal panel as an HDCP RX. */
/* To update this condition with 1(because the immediate repeater's internal */
@@ -689,9 +688,8 @@ static enum mod_hdcp_status validate_stream_ready(struct mod_hdcp *hdcp,
if (is_hdmi_dvi_sl_hdcp(hdcp)) {
if (!process_rxstatus(hdcp, event_ctx, input, &status))
goto out;
- if (event_ctx->rx_id_list_ready) {
+ if (event_ctx->rx_id_list_ready)
goto out;
- }
}
if (is_hdmi_dvi_sl_hdcp(hdcp))
if (!mod_hdcp_execute_and_set(check_stream_ready_available,
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
index c62df3bcc7cb..1d83c1b9da10 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
@@ -86,10 +86,12 @@
#define HDCP_CPIRQ_TRACE(hdcp) \
HDCP_LOG_FSM(hdcp, "[Link %d] --> CPIRQ", hdcp->config.index)
#define HDCP_EVENT_TRACE(hdcp, event) \
- if (event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) \
- HDCP_TIMEOUT_TRACE(hdcp); \
- else if (event == MOD_HDCP_EVENT_CPIRQ) \
- HDCP_CPIRQ_TRACE(hdcp)
+ do { \
+ if (event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) \
+ HDCP_TIMEOUT_TRACE(hdcp); \
+ else if (event == MOD_HDCP_EVENT_CPIRQ) \
+ HDCP_CPIRQ_TRACE(hdcp); \
+ } while (0)
/* TODO: find some way to tell if logging is off to save time */
#define HDCP_DDC_READ_TRACE(hdcp, msg_name, msg, msg_size) do { \
mod_hdcp_dump_binary_message(msg, msg_size, hdcp->buf, \
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
index ee67a35c2a8e..8c137d7c032e 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
@@ -443,7 +443,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
if (hdcp->displays[i].adjust.disable || hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE)
- continue;
+ continue;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -926,7 +926,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
if (hdcp->displays[i].adjust.disable || hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE)
- continue;
+ continue;
hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.display_handle = hdcp->displays[i].index;
hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.session_handle = hdcp->auth.id;
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h
index 5b71bc96b98c..7844ea91650b 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h
@@ -98,9 +98,9 @@ enum ta_dtm_encoder_type {
* This enum defines software value for dio_output_type
*/
typedef enum {
- TA_DTM_DIO_OUTPUT_TYPE__INVALID,
- TA_DTM_DIO_OUTPUT_TYPE__DIRECT,
- TA_DTM_DIO_OUTPUT_TYPE__DPIA
+ TA_DTM_DIO_OUTPUT_TYPE__INVALID,
+ TA_DTM_DIO_OUTPUT_TYPE__DIRECT,
+ TA_DTM_DIO_OUTPUT_TYPE__DPIA
} ta_dtm_dio_output_type;
struct ta_dtm_topology_update_input_v3 {
@@ -237,11 +237,11 @@ enum ta_hdcp2_hdcp2_msg_id_max_size {
#define TA_HDCP__HDCP1_KSV_LIST_MAX_ENTRIES 127
#define TA_HDCP__HDCP1_V_PRIME_SIZE 20
#define TA_HDCP__HDCP2_TX_BUF_MAX_SIZE \
- TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_NO_STORED_KM + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_STORED_KM + 6
+ (TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_NO_STORED_KM + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_STORED_KM + 6)
// 64 bits boundaries
#define TA_HDCP__HDCP2_RX_BUF_MAX_SIZE \
- TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_CERT + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_RECEIVER_INFO + 4
+ (TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_CERT + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_RECEIVER_INFO + 4)
enum ta_hdcp_status {
TA_HDCP_STATUS__SUCCESS = 0x00,
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
index afe1f6cce528..cc3dc9b589f6 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
@@ -23,34 +23,6 @@
*
*/
-
-
-
-/*
- * Copyright 2016 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
#ifndef MOD_FREESYNC_H_
#define MOD_FREESYNC_H_
diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
index 84f9b412a4f1..738ee763f24a 100644
--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
@@ -147,12 +147,15 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
}
/* VSC packet set to 4 for PSR-SU, or 2 for PSR1 */
- if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
- vsc_packet_revision = vsc_packet_rev4;
- else if (stream->link->replay_settings.config.replay_supported)
+ if (stream->link->psr_settings.psr_feature_enabled) {
+ if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
+ vsc_packet_revision = vsc_packet_rev4;
+ else if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
+ vsc_packet_revision = vsc_packet_rev2;
+ }
+
+ if (stream->link->replay_settings.config.replay_supported)
vsc_packet_revision = vsc_packet_rev4;
- else if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
- vsc_packet_revision = vsc_packet_rev2;
/* Update to revision 5 for extended colorimetry support */
if (stream->use_vsc_sdp_for_colorimetry)
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index a522a7c02911..ad98e504c00d 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -31,7 +31,7 @@
#define DIV_ROUNDUP(a, b) (((a)+((b)/2))/(b))
#define bswap16_based_on_endian(big_endian, value) \
- (big_endian) ? cpu_to_be16(value) : cpu_to_le16(value)
+ ((big_endian) ? cpu_to_be16(value) : cpu_to_le16(value))
/* Possible Min Reduction config from least aggressive to most aggressive
* 0 1 2 3 4 5 6 7 8 9 10 11 12
@@ -839,6 +839,8 @@ bool is_psr_su_specific_panel(struct dc_link *link)
((dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x08) ||
(dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x07)))
isPSRSUSupported = false;
+ else if (dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x03)
+ isPSRSUSupported = false;
else if (dpcd_caps->psr_info.force_psrsu_cap == 0x1)
isPSRSUSupported = true;
}
@@ -971,6 +973,34 @@ bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link,
return true;
}
+void set_replay_coasting_vtotal(struct dc_link *link,
+ enum replay_coasting_vtotal_type type,
+ uint16_t vtotal)
+{
+ link->replay_settings.coasting_vtotal_table[type] = vtotal;
+}
+
+void calculate_replay_link_off_frame_count(struct dc_link *link,
+ uint16_t vtotal, uint16_t htotal)
+{
+ uint8_t max_link_off_frame_count = 0;
+ uint16_t max_deviation_line = 0, pixel_deviation_per_line = 0;
+
+ max_deviation_line = link->dpcd_caps.pr_info.max_deviation_line;
+ pixel_deviation_per_line = link->dpcd_caps.pr_info.pixel_deviation_per_line;
+
+ if (htotal != 0 && vtotal != 0)
+ max_link_off_frame_count = htotal * max_deviation_line / (pixel_deviation_per_line * vtotal);
+ else
+ ASSERT(0);
+
+ link->replay_settings.link_off_frame_count_level =
+ max_link_off_frame_count >= PR_LINK_OFF_FRAME_COUNT_BEST ? PR_LINK_OFF_FRAME_COUNT_BEST :
+ max_link_off_frame_count >= PR_LINK_OFF_FRAME_COUNT_GOOD ? PR_LINK_OFF_FRAME_COUNT_GOOD :
+ PR_LINK_OFF_FRAME_COUNT_FAIL;
+
+}
+
bool fill_custom_backlight_caps(unsigned int config_no, struct dm_acpi_atif_backlight_caps *caps)
{
unsigned int data_points_size;
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
index d9e0d67d67f7..c17bbc6fb38c 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
@@ -54,6 +54,11 @@ bool dmub_init_abm_config(struct resource_pool *res_pool,
unsigned int inst);
void init_replay_config(struct dc_link *link, struct replay_config *pr_config);
+void set_replay_coasting_vtotal(struct dc_link *link,
+ enum replay_coasting_vtotal_type type,
+ uint16_t vtotal);
+void calculate_replay_link_off_frame_count(struct dc_link *link,
+ uint16_t vtotal, uint16_t htotal);
bool is_psr_su_specific_panel(struct dc_link *link);
void mod_power_calc_psr_configs(struct psr_config *psr_config,
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 7f98394338c2..1dc5dd9b7bf7 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -244,7 +244,6 @@ enum DC_FEATURE_MASK {
DC_DISABLE_LTTPR_DP2_0 = (1 << 6), //0x40, disabled by default
DC_PSR_ALLOW_SMU_OPT = (1 << 7), //0x80, disabled by default
DC_PSR_ALLOW_MULTI_DISP_OPT = (1 << 8), //0x100, disabled by default
- DC_REPLAY_MASK = (1 << 9), //0x200, disabled by default for dcn < 3.1.4
};
enum DC_DEBUG_MASK {
@@ -255,8 +254,10 @@ enum DC_DEBUG_MASK {
DC_DISABLE_PSR = 0x10,
DC_FORCE_SUBVP_MCLK_SWITCH = 0x20,
DC_DISABLE_MPO = 0x40,
- DC_DISABLE_REPLAY = 0x50,
DC_ENABLE_DPIA_TRACE = 0x80,
+ DC_ENABLE_DML2 = 0x100,
+ DC_DISABLE_PSR_SU = 0x200,
+ DC_DISABLE_REPLAY = 0x400,
};
enum amd_dpm_forced_level;
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h
index 7ee3d291120d..6f80bfa7e41a 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h
@@ -8707,10 +8707,10 @@
#define regBIF_BX1_MM_CFGREGS_CNTL_BASE_IDX 2
#define regBIF_BX1_BX_RESET_CNTL 0x00f0
#define regBIF_BX1_BX_RESET_CNTL_BASE_IDX 2
-#define regBIF_BX1_INTERRUPT_CNTL 0x8e11
-#define regBIF_BX1_INTERRUPT_CNTL_BASE_IDX 5
-#define regBIF_BX1_INTERRUPT_CNTL2 0x8e12
-#define regBIF_BX1_INTERRUPT_CNTL2_BASE_IDX 5
+#define regBIF_BX1_INTERRUPT_CNTL 0x00f1
+#define regBIF_BX1_INTERRUPT_CNTL_BASE_IDX 2
+#define regBIF_BX1_INTERRUPT_CNTL2 0x00f2
+#define regBIF_BX1_INTERRUPT_CNTL2_BASE_IDX 2
#define regBIF_BX1_CLKREQB_PAD_CNTL 0x00f8
#define regBIF_BX1_CLKREQB_PAD_CNTL_BASE_IDX 2
#define regBIF_BX1_BIF_FEATURES_CONTROL_MISC 0x00fb
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_10_0_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_10_0_2_offset.h
new file mode 100644
index 000000000000..a4dd372c0541
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_10_0_2_offset.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _smuio_10_0_2_OFFSET_HEADER
+
+// addressBlock: smuio_smuio_misc_SmuSmuioDec
+// base address: 0x5a000
+#define mmSMUIO_MCM_CONFIG 0x0023
+#define mmSMUIO_MCM_CONFIG_BASE_IDX 0
+#define mmIP_DISCOVERY_VERSION 0x0000
+#define mmIP_DISCOVERY_VERSION_BASE_IDX 1
+#define mmIO_SMUIO_PINSTRAP 0x01b1
+#define mmIO_SMUIO_PINSTRAP_BASE_IDX 1
+#define mmSCRATCH_REGISTER0 0x01b2
+#define mmSCRATCH_REGISTER0_BASE_IDX 1
+#define mmSCRATCH_REGISTER1 0x01b3
+#define mmSCRATCH_REGISTER1_BASE_IDX 1
+#define mmSCRATCH_REGISTER2 0x01b4
+#define mmSCRATCH_REGISTER2_BASE_IDX 1
+#define mmSCRATCH_REGISTER3 0x01b5
+#define mmSCRATCH_REGISTER3_BASE_IDX 1
+#define mmSCRATCH_REGISTER4 0x01b6
+#define mmSCRATCH_REGISTER4_BASE_IDX 1
+#define mmSCRATCH_REGISTER5 0x01b7
+#define mmSCRATCH_REGISTER5_BASE_IDX 1
+#define mmSCRATCH_REGISTER6 0x01b8
+#define mmSCRATCH_REGISTER6_BASE_IDX 1
+#define mmSCRATCH_REGISTER7 0x01b9
+#define mmSCRATCH_REGISTER7_BASE_IDX 1
+
+
+// addressBlock: smuio_smuio_reset_SmuSmuioDec
+// base address: 0x5a300
+#define mmSMUIO_MP_RESET_INTR 0x00c1
+#define mmSMUIO_MP_RESET_INTR_BASE_IDX 0
+#define mmSMUIO_SOC_HALT 0x00c2
+#define mmSMUIO_SOC_HALT_BASE_IDX 0
+#define mmSMUIO_GFX_MISC_CNTL 0x00c8
+#define mmSMUIO_GFX_MISC_CNTL_BASE_IDX 0
+
+
+// addressBlock: smuio_smuio_ccxctrl_SmuSmuioDec
+// base address: 0x5a000
+#define mmPWROK_REFCLK_GAP_CYCLES 0x0001
+#define mmPWROK_REFCLK_GAP_CYCLES_BASE_IDX 1
+#define mmGOLDEN_TSC_INCREMENT_UPPER 0x0004
+#define mmGOLDEN_TSC_INCREMENT_UPPER_BASE_IDX 1
+#define mmGOLDEN_TSC_INCREMENT_LOWER 0x0005
+#define mmGOLDEN_TSC_INCREMENT_LOWER_BASE_IDX 1
+#define mmGOLDEN_TSC_COUNT_UPPER 0x0025
+#define mmGOLDEN_TSC_COUNT_UPPER_BASE_IDX 1
+#define mmGOLDEN_TSC_COUNT_LOWER 0x0026
+#define mmGOLDEN_TSC_COUNT_LOWER_BASE_IDX 1
+#define mmGFX_GOLDEN_TSC_SHADOW_UPPER 0x0029
+#define mmGFX_GOLDEN_TSC_SHADOW_UPPER_BASE_IDX 1
+#define mmGFX_GOLDEN_TSC_SHADOW_LOWER 0x002a
+#define mmGFX_GOLDEN_TSC_SHADOW_LOWER_BASE_IDX 1
+#define mmSOC_GOLDEN_TSC_SHADOW_UPPER 0x002b
+#define mmSOC_GOLDEN_TSC_SHADOW_UPPER_BASE_IDX 1
+#define mmSOC_GOLDEN_TSC_SHADOW_LOWER 0x002c
+#define mmSOC_GOLDEN_TSC_SHADOW_LOWER_BASE_IDX 1
+#define mmSOC_GAP_PWROK 0x002d
+#define mmSOC_GAP_PWROK_BASE_IDX 1
+
+// addressBlock: smuio_smuio_swtimer_SmuSmuioDec
+// base address: 0x5ac40
+#define mmPWR_VIRT_RESET_REQ 0x0110
+#define mmPWR_VIRT_RESET_REQ_BASE_IDX 1
+#define mmPWR_DISP_TIMER_CONTROL 0x0111
+#define mmPWR_DISP_TIMER_CONTROL_BASE_IDX 1
+#define mmPWR_DISP_TIMER2_CONTROL 0x0113
+#define mmPWR_DISP_TIMER2_CONTROL_BASE_IDX 1
+#define mmPWR_DISP_TIMER_GLOBAL_CONTROL 0x0115
+#define mmPWR_DISP_TIMER_GLOBAL_CONTROL_BASE_IDX 1
+#define mmPWR_IH_CONTROL 0x0116
+#define mmPWR_IH_CONTROL_BASE_IDX 1
+
+// addressBlock: smuio_smuio_svi0_SmuSmuioDec
+// base address: 0x6f000
+#define mmSMUSVI0_TEL_PLANE0 0x520e
+#define mmSMUSVI0_TEL_PLANE0_BASE_IDX 1
+#define mmSMUSVI0_PLANE0_CURRENTVID 0x5217
+#define mmSMUSVI0_PLANE0_CURRENTVID_BASE_IDX 1
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_10_0_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_10_0_2_sh_mask.h
new file mode 100644
index 000000000000..d10ae61c346b
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_10_0_2_sh_mask.h
@@ -0,0 +1,184 @@
+/*
+ * Copyright (C) 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _smuio_10_0_2_SH_MASK_HEADER
+
+// addressBlock: smuio_smuio_misc_SmuSmuioDec
+//SMUIO_MCM_CONFIG
+#define SMUIO_MCM_CONFIG__DIE_ID__SHIFT 0x0
+#define SMUIO_MCM_CONFIG__PKG_TYPE__SHIFT 0x2
+#define SMUIO_MCM_CONFIG__SOCKET_ID__SHIFT 0x5
+#define SMUIO_MCM_CONFIG__PKG_SUBTYPE__SHIFT 0x6
+#define SMUIO_MCM_CONFIG__CONSOLE_K__SHIFT 0x10
+#define SMUIO_MCM_CONFIG__CONSOLE_A__SHIFT 0x11
+#define SMUIO_MCM_CONFIG__DIE_ID_MASK 0x00000003L
+#define SMUIO_MCM_CONFIG__PKG_TYPE_MASK 0x0000001CL
+#define SMUIO_MCM_CONFIG__SOCKET_ID_MASK 0x00000020L
+#define SMUIO_MCM_CONFIG__PKG_SUBTYPE_MASK 0x000000C0L
+#define SMUIO_MCM_CONFIG__CONSOLE_K_MASK 0x00010000L
+#define SMUIO_MCM_CONFIG__CONSOLE_A_MASK 0x00020000L
+//IP_DISCOVERY_VERSION
+#define IP_DISCOVERY_VERSION__IP_DISCOVERY_VERSION__SHIFT 0x0
+#define IP_DISCOVERY_VERSION__IP_DISCOVERY_VERSION_MASK 0xFFFFFFFFL
+//IO_SMUIO_PINSTRAP
+#define IO_SMUIO_PINSTRAP__AUD_PORT_CONN__SHIFT 0x0
+#define IO_SMUIO_PINSTRAP__AUD__SHIFT 0x3
+#define IO_SMUIO_PINSTRAP__AUD_PORT_CONN_MASK 0x00000007L
+#define IO_SMUIO_PINSTRAP__AUD_MASK 0x00000018L
+//SCRATCH_REGISTER0
+#define SCRATCH_REGISTER0__ScratchPad0__SHIFT 0x0
+#define SCRATCH_REGISTER0__ScratchPad0_MASK 0xFFFFFFFFL
+//SCRATCH_REGISTER1
+#define SCRATCH_REGISTER1__ScratchPad1__SHIFT 0x0
+#define SCRATCH_REGISTER1__ScratchPad1_MASK 0xFFFFFFFFL
+//SCRATCH_REGISTER2
+#define SCRATCH_REGISTER2__ScratchPad2__SHIFT 0x0
+#define SCRATCH_REGISTER2__ScratchPad2_MASK 0xFFFFFFFFL
+//SCRATCH_REGISTER3
+#define SCRATCH_REGISTER3__ScratchPad3__SHIFT 0x0
+#define SCRATCH_REGISTER3__ScratchPad3_MASK 0xFFFFFFFFL
+//SCRATCH_REGISTER4
+#define SCRATCH_REGISTER4__ScratchPad4__SHIFT 0x0
+#define SCRATCH_REGISTER4__ScratchPad4_MASK 0xFFFFFFFFL
+//SCRATCH_REGISTER5
+#define SCRATCH_REGISTER5__ScratchPad5__SHIFT 0x0
+#define SCRATCH_REGISTER5__ScratchPad5_MASK 0xFFFFFFFFL
+//SCRATCH_REGISTER6
+#define SCRATCH_REGISTER6__ScratchPad6__SHIFT 0x0
+#define SCRATCH_REGISTER6__ScratchPad6_MASK 0xFFFFFFFFL
+//SCRATCH_REGISTER7
+#define SCRATCH_REGISTER7__ScratchPad7__SHIFT 0x0
+#define SCRATCH_REGISTER7__ScratchPad7_MASK 0xFFFFFFFFL
+
+// addressBlock: smuio_smuio_reset_SmuSmuioDec
+//SMUIO_MP_RESET_INTR
+#define SMUIO_MP_RESET_INTR__SMUIO_MP_RESET_INTR__SHIFT 0x0
+#define SMUIO_MP_RESET_INTR__SMUIO_MP_RESET_INTR_MASK 0x00000001L
+//SMUIO_SOC_HALT
+#define SMUIO_SOC_HALT__WDT_FORCE_PWROK_EN__SHIFT 0x2
+#define SMUIO_SOC_HALT__WDT_FORCE_RESETn_EN__SHIFT 0x3
+#define SMUIO_SOC_HALT__WDT_FORCE_PWROK_EN_MASK 0x00000004L
+#define SMUIO_SOC_HALT__WDT_FORCE_RESETn_EN_MASK 0x00000008L
+//SMUIO_GFX_MISC_CNTL
+#define SMUIO_GFX_MISC_CNTL__SMU_GFX_cold_vs_gfxoff__SHIFT 0x0
+#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT 0x1
+#define SMUIO_GFX_MISC_CNTL__PWR_GFX_DLDO_CLK_SWITCH__SHIFT 0x3
+#define SMUIO_GFX_MISC_CNTL__PWR_GFX_RLC_CGPG_EN__SHIFT 0x4
+#define SMUIO_GFX_MISC_CNTL__SMU_GFX_cold_vs_gfxoff_MASK 0x00000001L
+#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK 0x00000006L
+#define SMUIO_GFX_MISC_CNTL__PWR_GFX_DLDO_CLK_SWITCH_MASK 0x00000008L
+#define SMUIO_GFX_MISC_CNTL__PWR_GFX_RLC_CGPG_EN_MASK 0x00000010L
+
+// addressBlock: smuio_smuio_ccxctrl_SmuSmuioDec
+//PWROK_REFCLK_GAP_CYCLES
+#define PWROK_REFCLK_GAP_CYCLES__Pwrok_PreAssertion_clkgap_cycles__SHIFT 0x0
+#define PWROK_REFCLK_GAP_CYCLES__Pwrok_PostAssertion_clkgap_cycles__SHIFT 0x8
+#define PWROK_REFCLK_GAP_CYCLES__Pwrok_PreAssertion_clkgap_cycles_MASK 0x000000FFL
+#define PWROK_REFCLK_GAP_CYCLES__Pwrok_PostAssertion_clkgap_cycles_MASK 0x0000FF00L
+//GOLDEN_TSC_INCREMENT_UPPER
+#define GOLDEN_TSC_INCREMENT_UPPER__GoldenTscIncrementUpper__SHIFT 0x0
+#define GOLDEN_TSC_INCREMENT_UPPER__GoldenTscIncrementUpper_MASK 0x00FFFFFFL
+//GOLDEN_TSC_INCREMENT_LOWER
+#define GOLDEN_TSC_INCREMENT_LOWER__GoldenTscIncrementLower__SHIFT 0x0
+#define GOLDEN_TSC_INCREMENT_LOWER__GoldenTscIncrementLower_MASK 0xFFFFFFFFL
+//GOLDEN_TSC_COUNT_UPPER
+#define GOLDEN_TSC_COUNT_UPPER__GoldenTscCountUpper__SHIFT 0x0
+#define GOLDEN_TSC_COUNT_UPPER__GoldenTscCountUpper_MASK 0x00FFFFFFL
+//GOLDEN_TSC_COUNT_LOWER
+#define GOLDEN_TSC_COUNT_LOWER__GoldenTscCountLower__SHIFT 0x0
+#define GOLDEN_TSC_COUNT_LOWER__GoldenTscCountLower_MASK 0xFFFFFFFFL
+//GFX_GOLDEN_TSC_SHADOW_UPPER
+#define GFX_GOLDEN_TSC_SHADOW_UPPER__GfxGoldenTscShadowUpper__SHIFT 0x0
+#define GFX_GOLDEN_TSC_SHADOW_UPPER__GfxGoldenTscShadowUpper_MASK 0x00FFFFFFL
+//GFX_GOLDEN_TSC_SHADOW_LOWER
+#define GFX_GOLDEN_TSC_SHADOW_LOWER__GfxGoldenTscShadowLower__SHIFT 0x0
+#define GFX_GOLDEN_TSC_SHADOW_LOWER__GfxGoldenTscShadowLower_MASK 0xFFFFFFFFL
+//SOC_GOLDEN_TSC_SHADOW_UPPER
+#define SOC_GOLDEN_TSC_SHADOW_UPPER__SocGoldenTscShadowUpper__SHIFT 0x0
+#define SOC_GOLDEN_TSC_SHADOW_UPPER__SocGoldenTscShadowUpper_MASK 0x00FFFFFFL
+//SOC_GOLDEN_TSC_SHADOW_LOWER
+#define SOC_GOLDEN_TSC_SHADOW_LOWER__SocGoldenTscShadowLower__SHIFT 0x0
+#define SOC_GOLDEN_TSC_SHADOW_LOWER__SocGoldenTscShadowLower_MASK 0xFFFFFFFFL
+//SOC_GAP_PWROK
+#define SOC_GAP_PWROK__soc_gap_pwrok__SHIFT 0x0
+#define SOC_GAP_PWROK__soc_gap_pwrok_MASK 0x00000001L
+
+// addressBlock: smuio_smuio_swtimer_SmuSmuioDec
+//PWR_VIRT_RESET_REQ
+#define PWR_VIRT_RESET_REQ__VF_FLR__SHIFT 0x0
+#define PWR_VIRT_RESET_REQ__PF_FLR__SHIFT 0x1f
+#define PWR_VIRT_RESET_REQ__VF_FLR_MASK 0x7FFFFFFFL
+#define PWR_VIRT_RESET_REQ__PF_FLR_MASK 0x80000000L
+//PWR_DISP_TIMER_CONTROL
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_COUNT__SHIFT 0x0
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_ENABLE__SHIFT 0x19
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_DISABLE__SHIFT 0x1a
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MASK__SHIFT 0x1b
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_STAT_AK__SHIFT 0x1c
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_TYPE__SHIFT 0x1d
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MODE__SHIFT 0x1e
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_COUNT_MASK 0x01FFFFFFL
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_ENABLE_MASK 0x02000000L
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_DISABLE_MASK 0x04000000L
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MASK_MASK 0x08000000L
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_STAT_AK_MASK 0x10000000L
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_TYPE_MASK 0x20000000L
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MODE_MASK 0x40000000L
+//PWR_DISP_TIMER2_CONTROL
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_COUNT__SHIFT 0x0
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_ENABLE__SHIFT 0x19
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_DISABLE__SHIFT 0x1a
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MASK__SHIFT 0x1b
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_STAT_AK__SHIFT 0x1c
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_TYPE__SHIFT 0x1d
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MODE__SHIFT 0x1e
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_COUNT_MASK 0x01FFFFFFL
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_ENABLE_MASK 0x02000000L
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_DISABLE_MASK 0x04000000L
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MASK_MASK 0x08000000L
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_STAT_AK_MASK 0x10000000L
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_TYPE_MASK 0x20000000L
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MODE_MASK 0x40000000L
+//PWR_DISP_TIMER_GLOBAL_CONTROL
+#define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_WIDTH__SHIFT 0x0
+#define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_EN__SHIFT 0xa
+#define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_WIDTH_MASK 0x000003FFL
+#define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_EN_MASK 0x00000400L
+//PWR_IH_CONTROL
+#define PWR_IH_CONTROL__MAX_CREDIT__SHIFT 0x0
+#define PWR_IH_CONTROL__DISP_TIMER_TRIGGER_MASK__SHIFT 0x5
+#define PWR_IH_CONTROL__DISP_TIMER2_TRIGGER_MASK__SHIFT 0x6
+#define PWR_IH_CONTROL__PWR_IH_CLK_GATE_EN__SHIFT 0x1f
+#define PWR_IH_CONTROL__MAX_CREDIT_MASK 0x0000001FL
+#define PWR_IH_CONTROL__DISP_TIMER_TRIGGER_MASK_MASK 0x00000020L
+#define PWR_IH_CONTROL__DISP_TIMER2_TRIGGER_MASK_MASK 0x00000040L
+#define PWR_IH_CONTROL__PWR_IH_CLK_GATE_EN_MASK 0x80000000L
+
+// addressBlock: smuio_smuio_svi0_SmuSmuioDec
+//SMUSVI0_TEL_PLANE0
+#define SMUSVI0_TEL_PLANE0__SVI0_PLANE0_IDDCOR__SHIFT 0x0
+#define SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT 0x10
+#define SMUSVI0_TEL_PLANE0__SVI0_PLANE0_IDDCOR_MASK 0x000000FFL
+#define SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK 0x01FF0000L
+//SMUSVI0_PLANE0_CURRENTVID
+#define SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID__SHIFT 0x18
+#define SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID_MASK 0xFF000000L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 8ebba87f4289..edcb85560ced 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -318,6 +318,7 @@ enum pp_xgmi_plpd_mode {
#define MAX_GFX_CLKS 8
#define MAX_CLKS 4
#define NUM_VCN 4
+#define NUM_JPEG_ENG 32
struct seq_file;
enum amd_pp_clock_type;
@@ -775,6 +776,85 @@ struct gpu_metrics_v1_4 {
uint16_t padding;
};
+struct gpu_metrics_v1_5 {
+ struct metrics_table_header common_header;
+
+ /* Temperature (Celsius) */
+ uint16_t temperature_hotspot;
+ uint16_t temperature_mem;
+ uint16_t temperature_vrsoc;
+
+ /* Power (Watts) */
+ uint16_t curr_socket_power;
+
+ /* Utilization (%) */
+ uint16_t average_gfx_activity;
+ uint16_t average_umc_activity; // memory controller
+ uint16_t vcn_activity[NUM_VCN];
+ uint16_t jpeg_activity[NUM_JPEG_ENG];
+
+ /* Energy (15.259uJ (2^-16) units) */
+ uint64_t energy_accumulator;
+
+ /* Driver attached timestamp (in ns) */
+ uint64_t system_clock_counter;
+
+ /* Throttle status */
+ uint32_t throttle_status;
+
+ /* Clock Lock Status. Each bit corresponds to clock instance */
+ uint32_t gfxclk_lock_status;
+
+ /* Link width (number of lanes) and speed (in 0.1 GT/s) */
+ uint16_t pcie_link_width;
+ uint16_t pcie_link_speed;
+
+ /* XGMI bus width and bitrate (in Gbps) */
+ uint16_t xgmi_link_width;
+ uint16_t xgmi_link_speed;
+
+ /* Utilization Accumulated (%) */
+ uint32_t gfx_activity_acc;
+ uint32_t mem_activity_acc;
+
+ /*PCIE accumulated bandwidth (GB/sec) */
+ uint64_t pcie_bandwidth_acc;
+
+ /*PCIE instantaneous bandwidth (GB/sec) */
+ uint64_t pcie_bandwidth_inst;
+
+ /* PCIE L0 to recovery state transition accumulated count */
+ uint64_t pcie_l0_to_recov_count_acc;
+
+ /* PCIE replay accumulated count */
+ uint64_t pcie_replay_count_acc;
+
+ /* PCIE replay rollover accumulated count */
+ uint64_t pcie_replay_rover_count_acc;
+
+ /* PCIE NAK sent accumulated count */
+ uint32_t pcie_nak_sent_count_acc;
+
+ /* PCIE NAK received accumulated count */
+ uint32_t pcie_nak_rcvd_count_acc;
+
+ /* XGMI accumulated data transfer size(KiloBytes) */
+ uint64_t xgmi_read_data_acc[NUM_XGMI_LINKS];
+ uint64_t xgmi_write_data_acc[NUM_XGMI_LINKS];
+
+ /* PMFW attached timestamp (10ns resolution) */
+ uint64_t firmware_timestamp;
+
+ /* Current clocks (Mhz) */
+ uint16_t current_gfxclk[MAX_GFX_CLKS];
+ uint16_t current_socclk[MAX_CLKS];
+ uint16_t current_vclk0[MAX_CLKS];
+ uint16_t current_dclk0[MAX_CLKS];
+ uint16_t current_uclk;
+
+ uint16_t padding;
+};
+
/*
* gpu_metrics_v2_0 is not recommended as it's not naturally aligned.
* Use gpu_metrics_v2_1 or later instead.
@@ -1086,6 +1166,10 @@ struct gpu_metrics_v3_0 {
uint16_t average_dram_reads;
/* time filtered DRAM write bandwidth [MB/sec] */
uint16_t average_dram_writes;
+ /* time filtered IPU read bandwidth [MB/sec] */
+ uint16_t average_ipu_reads;
+ /* time filtered IPU write bandwidth [MB/sec] */
+ uint16_t average_ipu_writes;
/* Driver attached timestamp (in ns) */
uint64_t system_clock_counter;
@@ -1105,6 +1189,8 @@ struct gpu_metrics_v3_0 {
uint32_t average_all_core_power;
/* calculated core power [mW] */
uint16_t average_core_power[16];
+ /* time filtered total system power [mW] */
+ uint16_t average_sys_power;
/* maximum IRM defined STAPM power limit [mW] */
uint16_t stapm_power_limit;
/* time filtered STAPM power limit [mW] */
@@ -1117,6 +1203,8 @@ struct gpu_metrics_v3_0 {
uint16_t average_ipuclk_frequency;
uint16_t average_fclk_frequency;
uint16_t average_vclk_frequency;
+ uint16_t average_uclk_frequency;
+ uint16_t average_mpipu_frequency;
/* Current clocks */
/* target core frequency [MHz] */
@@ -1126,6 +1214,15 @@ struct gpu_metrics_v3_0 {
/* GFXCLK frequency limit enforced on GFX [MHz] */
uint16_t current_gfx_maxfreq;
+ /* Throttle Residency (ASIC dependent) */
+ uint32_t throttle_residency_prochot;
+ uint32_t throttle_residency_spl;
+ uint32_t throttle_residency_fppt;
+ uint32_t throttle_residency_sppt;
+ uint32_t throttle_residency_thm_core;
+ uint32_t throttle_residency_thm_gfx;
+ uint32_t throttle_residency_thm_soc;
+
/* Metrics table alpha filter time constant [us] */
uint32_t time_filter_alphavalue;
};
diff --git a/drivers/gpu/drm/amd/include/mes_v11_api_def.h b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
index b1db2b190187..ec5b9ab67c5e 100644
--- a/drivers/gpu/drm/amd/include/mes_v11_api_def.h
+++ b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
@@ -232,6 +232,7 @@ union MESAPI_SET_HW_RESOURCES {
};
uint32_t oversubscription_timer;
uint64_t doorbell_info;
+ uint64_t event_intr_history_gpu_mc_ptr;
};
uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
@@ -571,7 +572,8 @@ struct SET_SHADER_DEBUGGER {
struct {
uint32_t single_memop : 1; /* SQ_DEBUG.single_memop */
uint32_t single_alu_op : 1; /* SQ_DEBUG.single_alu_op */
- uint32_t reserved : 30;
+ uint32_t reserved : 29;
+ uint32_t process_ctx_flush : 1;
};
uint32_t u32all;
} flags;
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 97b40c5fa1ff..6627ee07d52d 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -616,6 +616,16 @@ void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
enable ? "enable" : "disable", ret);
}
+void amdgpu_dpm_enable_vpe(struct amdgpu_device *adev, bool enable)
+{
+ int ret = 0;
+
+ ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VPE, !enable);
+ if (ret)
+ DRM_ERROR("Dpm %s vpe failed, ret = %d.\n",
+ enable ? "enable" : "disable", ret);
+}
+
int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index e1497296afee..087d57850304 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -2168,7 +2168,9 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
if (amdgpu_dpm_is_overdrive_supported(adev))
*states = ATTR_STATE_SUPPORTED;
} else if (DEVICE_ATTR_IS(mem_busy_percent)) {
- if (adev->flags & AMD_IS_APU || gc_ver == IP_VERSION(9, 0, 1))
+ if ((adev->flags & AMD_IS_APU &&
+ gc_ver != IP_VERSION(9, 4, 3)) ||
+ gc_ver == IP_VERSION(9, 0, 1))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pcie_bw)) {
/* PCIe Perf counters won't work on APU nodes */
@@ -2238,10 +2240,10 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
} else if (DEVICE_ATTR_IS(xgmi_plpd_policy)) {
if (amdgpu_dpm_get_xgmi_plpd_mode(adev, NULL) == XGMI_PLPD_NONE)
*states = ATTR_STATE_UNSUPPORTED;
- } else if (DEVICE_ATTR_IS(pp_dpm_mclk_od)) {
+ } else if (DEVICE_ATTR_IS(pp_mclk_od)) {
if (amdgpu_dpm_get_mclk_od(adev) == -EOPNOTSUPP)
*states = ATTR_STATE_UNSUPPORTED;
- } else if (DEVICE_ATTR_IS(pp_dpm_sclk_od)) {
+ } else if (DEVICE_ATTR_IS(pp_sclk_od)) {
if (amdgpu_dpm_get_sclk_od(adev) == -EOPNOTSUPP)
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(apu_thermal_cap)) {
@@ -4347,11 +4349,19 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
seq_printf(m, "\t%u mV (VDDNB)\n", value);
size = sizeof(uint32_t);
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&query, &size))
- seq_printf(m, "\t%u.%02u W (average GPU)\n", query >> 8, query & 0xff);
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&query, &size)) {
+ if (adev->flags & AMD_IS_APU)
+ seq_printf(m, "\t%u.%02u W (average SoC including CPU)\n", query >> 8, query & 0xff);
+ else
+ seq_printf(m, "\t%u.%02u W (average SoC)\n", query >> 8, query & 0xff);
+ }
size = sizeof(uint32_t);
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&query, &size))
- seq_printf(m, "\t%u.%02u W (current GPU)\n", query >> 8, query & 0xff);
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&query, &size)) {
+ if (adev->flags & AMD_IS_APU)
+ seq_printf(m, "\t%u.%02u W (current SoC including CPU)\n", query >> 8, query & 0xff);
+ else
+ seq_printf(m, "\t%u.%02u W (current SoC)\n", query >> 8, query & 0xff);
+ }
size = sizeof(value);
seq_printf(m, "\n");
@@ -4377,9 +4387,9 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
/* VCN clocks */
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) {
if (!value) {
- seq_printf(m, "VCN: Disabled\n");
+ seq_printf(m, "VCN: Powered down\n");
} else {
- seq_printf(m, "VCN: Enabled\n");
+ seq_printf(m, "VCN: Powered up\n");
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
@@ -4391,9 +4401,9 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
/* UVD clocks */
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
if (!value) {
- seq_printf(m, "UVD: Disabled\n");
+ seq_printf(m, "UVD: Powered down\n");
} else {
- seq_printf(m, "UVD: Enabled\n");
+ seq_printf(m, "UVD: Powered up\n");
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
@@ -4405,9 +4415,9 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
/* VCE clocks */
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
if (!value) {
- seq_printf(m, "VCE: Disabled\n");
+ seq_printf(m, "VCE: Powered down\n");
} else {
- seq_printf(m, "VCE: Enabled\n");
+ seq_printf(m, "VCE: Powered up\n");
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
}
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
index d76b0a60db44..3047ffe7f244 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
@@ -445,6 +445,7 @@ void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev);
void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable);
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable);
void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable);
+void amdgpu_dpm_enable_vpe(struct amdgpu_device *adev, bool enable);
int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version);
int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable);
int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size);
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
index 5d28c951a319..5cb4725c773f 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
@@ -2735,10 +2735,8 @@ static int kv_parse_power_table(struct amdgpu_device *adev)
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
&non_clock_info_array->nonClockInfo[non_clock_array_index];
ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL);
- if (ps == NULL) {
- kfree(adev->pm.dpm.ps);
+ if (ps == NULL)
return -ENOMEM;
- }
adev->pm.dpm.ps[i].ps_priv = ps;
k = 0;
idx = (u8 *)&power_state->v2.clockInfoIndex[0];
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
index 81fb4e5dd804..60377747bab4 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
@@ -272,10 +272,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
dep_table);
- if (ret) {
- amdgpu_free_extended_power_table(adev);
+ if (ret)
return ret;
- }
}
if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
@@ -283,10 +281,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
dep_table);
- if (ret) {
- amdgpu_free_extended_power_table(adev);
+ if (ret)
return ret;
- }
}
if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
@@ -294,10 +290,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
dep_table);
- if (ret) {
- amdgpu_free_extended_power_table(adev);
+ if (ret)
return ret;
- }
}
if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
@@ -305,10 +299,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
dep_table);
- if (ret) {
- amdgpu_free_extended_power_table(adev);
+ if (ret)
return ret;
- }
}
if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
@@ -339,10 +331,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
kcalloc(psl->ucNumEntries,
sizeof(struct amdgpu_phase_shedding_limits_entry),
GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
- amdgpu_free_extended_power_table(adev);
+ if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries)
return -ENOMEM;
- }
entry = &psl->entries[0];
for (i = 0; i < psl->ucNumEntries; i++) {
@@ -383,10 +373,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
ATOM_PPLIB_CAC_Leakage_Record *entry;
u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
- amdgpu_free_extended_power_table(adev);
+ if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries)
return -ENOMEM;
- }
entry = &cac_table->entries[0];
for (i = 0; i < cac_table->ucNumEntries; i++) {
if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
@@ -438,10 +426,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
kzalloc(size, GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
- amdgpu_free_extended_power_table(adev);
+ if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries)
return -ENOMEM;
- }
adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
limits->numEntries;
entry = &limits->entries[0];
@@ -493,10 +479,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
kzalloc(size, GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
- amdgpu_free_extended_power_table(adev);
+ if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries)
return -ENOMEM;
- }
adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
limits->numEntries;
entry = &limits->entries[0];
@@ -525,10 +509,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
sizeof(struct amdgpu_clock_voltage_dependency_entry);
adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
kzalloc(size, GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
- amdgpu_free_extended_power_table(adev);
+ if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries)
return -ENOMEM;
- }
adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
limits->numEntries;
entry = &limits->entries[0];
@@ -548,10 +530,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
le16_to_cpu(ext_hdr->usPPMTableOffset));
adev->pm.dpm.dyn_state.ppm_table =
kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.ppm_table) {
- amdgpu_free_extended_power_table(adev);
+ if (!adev->pm.dpm.dyn_state.ppm_table)
return -ENOMEM;
- }
adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
le16_to_cpu(ppm->usCpuCoreNumber);
@@ -583,10 +563,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
sizeof(struct amdgpu_clock_voltage_dependency_entry);
adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
kzalloc(size, GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
- amdgpu_free_extended_power_table(adev);
+ if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries)
return -ENOMEM;
- }
adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
limits->numEntries;
entry = &limits->entries[0];
@@ -606,10 +584,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
ATOM_PowerTune_Table *pt;
adev->pm.dpm.dyn_state.cac_tdp_table =
kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
- amdgpu_free_extended_power_table(adev);
+ if (!adev->pm.dpm.dyn_state.cac_tdp_table)
return -ENOMEM;
- }
if (rev > 0) {
ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
(mode_info->atom_context->bios + data_offset +
@@ -645,10 +621,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
ret = amdgpu_parse_clk_voltage_dep_table(
&adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
dep_table);
- if (ret) {
- kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
+ if (ret)
return ret;
- }
}
}
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
index fc8e4ac6c8e7..df4f20293c16 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
@@ -7379,10 +7379,9 @@ static int si_dpm_init(struct amdgpu_device *adev)
kcalloc(4,
sizeof(struct amdgpu_clock_voltage_dependency_entry),
GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
- amdgpu_free_extended_power_table(adev);
+ if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries)
return -ENOMEM;
- }
+
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
index f2a55c1413f5..17882f8dfdd3 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
@@ -200,7 +200,7 @@ static int get_platform_power_management_table(
struct pp_hwmgr *hwmgr,
ATOM_Tonga_PPM_Table *atom_ppm_table)
{
- struct phm_ppm_table *ptr = kzalloc(sizeof(ATOM_Tonga_PPM_Table), GFP_KERNEL);
+ struct phm_ppm_table *ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
struct phm_ppt_v1_information *pp_table_information =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
index 11372fcc59c8..aa91730e4eaf 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
@@ -2974,6 +2974,8 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
result = smu7_get_evv_voltages(hwmgr);
if (result) {
pr_info("Get EVV Voltage Failed. Abort Driver loading!\n");
+ kfree(hwmgr->backend);
+ hwmgr->backend = NULL;
return -EINVAL;
}
} else {
@@ -3019,8 +3021,10 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
}
result = smu7_update_edc_leakage_table(hwmgr);
- if (result)
+ if (result) {
+ smu7_hwmgr_backend_fini(hwmgr);
return result;
+ }
return 0;
}
@@ -3995,6 +3999,7 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
uint32_t sclk, mclk, activity_percent;
uint32_t offset, val_vid;
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct amdgpu_device *adev = hwmgr->adev;
/* size must be at least 4 bytes for all sensors */
if (*size < 4)
@@ -4038,7 +4043,21 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
*size = 4;
return 0;
case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
- return smu7_get_gpu_power(hwmgr, (uint32_t *)value);
+ if ((adev->asic_type != CHIP_HAWAII) &&
+ (adev->asic_type != CHIP_BONAIRE) &&
+ (adev->asic_type != CHIP_FIJI) &&
+ (adev->asic_type != CHIP_TONGA))
+ return smu7_get_gpu_power(hwmgr, (uint32_t *)value);
+ else
+ return -EOPNOTSUPP;
+ case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
+ if ((adev->asic_type != CHIP_HAWAII) &&
+ (adev->asic_type != CHIP_BONAIRE) &&
+ (adev->asic_type != CHIP_FIJI) &&
+ (adev->asic_type != CHIP_TONGA))
+ return -EOPNOTSUPP;
+ else
+ return smu7_get_gpu_power(hwmgr, (uint32_t *)value);
case AMDGPU_PP_SENSOR_VDDGFX:
if ((data->vr_config & VRCONF_VDDGFX_MASK) ==
(VR_SVI2_PLANE_2 << VRCONF_VDDGFX_SHIFT))
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
index 9e4228232f02..ad1fd3150d03 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
@@ -2298,6 +2298,7 @@ static uint32_t ci_get_mac_definition(uint32_t value)
case SMU_MAX_ENTRIES_SMIO:
return SMU7_MAX_ENTRIES_SMIO;
case SMU_MAX_LEVELS_VDDC:
+ case SMU_MAX_LEVELS_VDDGFX:
return SMU7_MAX_LEVELS_VDDC;
case SMU_MAX_LEVELS_VDDCI:
return SMU7_MAX_LEVELS_VDDCI;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
index 97d9802fe673..17d2f5bff4a7 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
@@ -2263,6 +2263,7 @@ static uint32_t iceland_get_mac_definition(uint32_t value)
case SMU_MAX_ENTRIES_SMIO:
return SMU71_MAX_ENTRIES_SMIO;
case SMU_MAX_LEVELS_VDDC:
+ case SMU_MAX_LEVELS_VDDGFX:
return SMU71_MAX_LEVELS_VDDC;
case SMU_MAX_LEVELS_VDDCI:
return SMU71_MAX_LEVELS_VDDCI;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index f464610a959f..c16703868e5c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -1322,6 +1322,187 @@ static int smu_get_thermal_temperature_range(struct smu_context *smu)
return ret;
}
+/**
+ * smu_wbrf_handle_exclusion_ranges - consume the wbrf exclusion ranges
+ *
+ * @smu: smu_context pointer
+ *
+ * Retrieve the wbrf exclusion ranges and send them to PMFW for proper handling.
+ * Returns 0 on success, error on failure.
+ */
+static int smu_wbrf_handle_exclusion_ranges(struct smu_context *smu)
+{
+ struct wbrf_ranges_in_out wbrf_exclusion = {0};
+ struct freq_band_range *wifi_bands = wbrf_exclusion.band_list;
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t num_of_wbrf_ranges = MAX_NUM_OF_WBRF_RANGES;
+ uint64_t start, end;
+ int ret, i, j;
+
+ ret = amd_wbrf_retrieve_freq_band(adev->dev, &wbrf_exclusion);
+ if (ret) {
+ dev_err(adev->dev, "Failed to retrieve exclusion ranges!\n");
+ return ret;
+ }
+
+ /*
+ * The exclusion ranges array we got might be filled with holes and duplicate
+ * entries. For example:
+ * {(2400, 2500), (0, 0), (6882, 6962), (2400, 2500), (0, 0), (6117, 6189), (0, 0)...}
+ * We need to do some sortups to eliminate those holes and duplicate entries.
+ * Expected output: {(2400, 2500), (6117, 6189), (6882, 6962), (0, 0)...}
+ */
+ for (i = 0; i < num_of_wbrf_ranges; i++) {
+ start = wifi_bands[i].start;
+ end = wifi_bands[i].end;
+
+ /* get the last valid entry to fill the intermediate hole */
+ if (!start && !end) {
+ for (j = num_of_wbrf_ranges - 1; j > i; j--)
+ if (wifi_bands[j].start && wifi_bands[j].end)
+ break;
+
+ /* no valid entry left */
+ if (j <= i)
+ break;
+
+ start = wifi_bands[i].start = wifi_bands[j].start;
+ end = wifi_bands[i].end = wifi_bands[j].end;
+ wifi_bands[j].start = 0;
+ wifi_bands[j].end = 0;
+ num_of_wbrf_ranges = j;
+ }
+
+ /* eliminate duplicate entries */
+ for (j = i + 1; j < num_of_wbrf_ranges; j++) {
+ if ((wifi_bands[j].start == start) && (wifi_bands[j].end == end)) {
+ wifi_bands[j].start = 0;
+ wifi_bands[j].end = 0;
+ }
+ }
+ }
+
+ /* Send the sorted wifi_bands to PMFW */
+ ret = smu_set_wbrf_exclusion_ranges(smu, wifi_bands);
+ /* Try to set the wifi_bands again */
+ if (unlikely(ret == -EBUSY)) {
+ mdelay(5);
+ ret = smu_set_wbrf_exclusion_ranges(smu, wifi_bands);
+ }
+
+ return ret;
+}
+
+/**
+ * smu_wbrf_event_handler - handle notify events
+ *
+ * @nb: notifier block
+ * @action: event type
+ * @_arg: event data
+ *
+ * Calls relevant amdgpu function in response to wbrf event
+ * notification from kernel.
+ */
+static int smu_wbrf_event_handler(struct notifier_block *nb,
+ unsigned long action, void *_arg)
+{
+ struct smu_context *smu = container_of(nb, struct smu_context, wbrf_notifier);
+
+ switch (action) {
+ case WBRF_CHANGED:
+ schedule_delayed_work(&smu->wbrf_delayed_work,
+ msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE));
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
+/**
+ * smu_wbrf_delayed_work_handler - callback on delayed work timer expired
+ *
+ * @work: struct work_struct pointer
+ *
+ * Flood is over and driver will consume the latest exclusion ranges.
+ */
+static void smu_wbrf_delayed_work_handler(struct work_struct *work)
+{
+ struct smu_context *smu = container_of(work, struct smu_context, wbrf_delayed_work.work);
+
+ smu_wbrf_handle_exclusion_ranges(smu);
+}
+
+/**
+ * smu_wbrf_support_check - check wbrf support
+ *
+ * @smu: smu_context pointer
+ *
+ * Verifies the ACPI interface whether wbrf is supported.
+ */
+static void smu_wbrf_support_check(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ smu->wbrf_supported = smu_is_asic_wbrf_supported(smu) && amdgpu_wbrf &&
+ acpi_amd_wbrf_supported_consumer(adev->dev);
+
+ if (smu->wbrf_supported)
+ dev_info(adev->dev, "RF interference mitigation is supported\n");
+}
+
+/**
+ * smu_wbrf_init - init driver wbrf support
+ *
+ * @smu: smu_context pointer
+ *
+ * Verifies the AMD ACPI interfaces and registers with the wbrf
+ * notifier chain if wbrf feature is supported.
+ * Returns 0 on success, error on failure.
+ */
+static int smu_wbrf_init(struct smu_context *smu)
+{
+ int ret;
+
+ if (!smu->wbrf_supported)
+ return 0;
+
+ INIT_DELAYED_WORK(&smu->wbrf_delayed_work, smu_wbrf_delayed_work_handler);
+
+ smu->wbrf_notifier.notifier_call = smu_wbrf_event_handler;
+ ret = amd_wbrf_register_notifier(&smu->wbrf_notifier);
+ if (ret)
+ return ret;
+
+ /*
+ * Some wifiband exclusion ranges may be already there
+ * before our driver loaded. To make sure our driver
+ * is awared of those exclusion ranges.
+ */
+ schedule_delayed_work(&smu->wbrf_delayed_work,
+ msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE));
+
+ return 0;
+}
+
+/**
+ * smu_wbrf_fini - tear down driver wbrf support
+ *
+ * @smu: smu_context pointer
+ *
+ * Unregisters with the wbrf notifier chain.
+ */
+static void smu_wbrf_fini(struct smu_context *smu)
+{
+ if (!smu->wbrf_supported)
+ return;
+
+ amd_wbrf_unregister_notifier(&smu->wbrf_notifier);
+
+ cancel_delayed_work_sync(&smu->wbrf_delayed_work);
+}
+
static int smu_smc_hw_setup(struct smu_context *smu)
{
struct smu_feature *feature = &smu->smu_feature;
@@ -1414,6 +1595,15 @@ static int smu_smc_hw_setup(struct smu_context *smu)
if (ret)
return ret;
+ /* Enable UclkShadow on wbrf supported */
+ if (smu->wbrf_supported) {
+ ret = smu_enable_uclk_shadow(smu, true);
+ if (ret) {
+ dev_err(adev->dev, "Failed to enable UclkShadow feature to support wbrf!\n");
+ return ret;
+ }
+ }
+
/*
* With SCPM enabled, these actions(and relevant messages) are
* not needed and permitted.
@@ -1512,6 +1702,15 @@ static int smu_smc_hw_setup(struct smu_context *smu)
*/
ret = smu_set_min_dcef_deep_sleep(smu,
smu->smu_table.boot_values.dcefclk / 100);
+ if (ret) {
+ dev_err(adev->dev, "Error setting min deepsleep dcefclk\n");
+ return ret;
+ }
+
+ /* Init wbrf support. Properly setup the notifier */
+ ret = smu_wbrf_init(smu);
+ if (ret)
+ dev_err(adev->dev, "Error during wbrf init call\n");
return ret;
}
@@ -1567,6 +1766,13 @@ static int smu_hw_init(void *handle)
return ret;
}
+ /*
+ * Check whether wbrf is supported. This needs to be done
+ * before SMU setup starts since part of SMU configuration
+ * relies on this.
+ */
+ smu_wbrf_support_check(smu);
+
if (smu->is_apu) {
ret = smu_set_gfx_imu_enable(smu);
if (ret)
@@ -1733,6 +1939,8 @@ static int smu_smc_hw_cleanup(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ smu_wbrf_fini(smu);
+
cancel_work_sync(&smu->throttling_logging_work);
cancel_work_sync(&smu->interrupt_work);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index c125253df20b..2aa4fea87314 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -22,6 +22,9 @@
#ifndef __AMDGPU_SMU_H__
#define __AMDGPU_SMU_H__
+#include <linux/acpi_amd_wbrf.h>
+#include <linux/units.h>
+
#include "amdgpu.h"
#include "kgd_pp_interface.h"
#include "dm_pp_interface.h"
@@ -318,6 +321,7 @@ enum smu_table_id {
SMU_TABLE_PACE,
SMU_TABLE_ECCINFO,
SMU_TABLE_COMBO_PPTABLE,
+ SMU_TABLE_WIFIBAND,
SMU_TABLE_COUNT,
};
@@ -471,6 +475,12 @@ struct stb_context {
#define WORKLOAD_POLICY_MAX 7
+/*
+ * Configure wbrf event handling pace as there can be only one
+ * event processed every SMU_WBRF_EVENT_HANDLING_PACE ms.
+ */
+#define SMU_WBRF_EVENT_HANDLING_PACE 10
+
struct smu_context {
struct amdgpu_device *adev;
struct amdgpu_irq_src irq_source;
@@ -570,6 +580,11 @@ struct smu_context {
struct delayed_work swctf_delayed_work;
enum pp_xgmi_plpd_mode plpd_mode;
+
+ /* data structures for wbrf feature support */
+ bool wbrf_supported;
+ struct notifier_block wbrf_notifier;
+ struct delayed_work wbrf_delayed_work;
};
struct i2c_adapter;
@@ -1375,6 +1390,22 @@ struct pptable_funcs {
* @notify_rlc_state: Notify RLC power state to SMU.
*/
int (*notify_rlc_state)(struct smu_context *smu, bool en);
+
+ /**
+ * @is_asic_wbrf_supported: check whether PMFW supports the wbrf feature
+ */
+ bool (*is_asic_wbrf_supported)(struct smu_context *smu);
+
+ /**
+ * @enable_uclk_shadow: Enable the uclk shadow feature on wbrf supported
+ */
+ int (*enable_uclk_shadow)(struct smu_context *smu, bool enable);
+
+ /**
+ * @set_wbrf_exclusion_ranges: notify SMU the wifi bands occupied
+ */
+ int (*set_wbrf_exclusion_ranges)(struct smu_context *smu,
+ struct freq_band_range *exclusion_ranges);
};
typedef enum {
@@ -1418,6 +1449,16 @@ typedef enum {
METRICS_PCIE_WIDTH,
METRICS_CURR_FANPWM,
METRICS_CURR_SOCKETPOWER,
+ METRICS_AVERAGE_VPECLK,
+ METRICS_AVERAGE_IPUCLK,
+ METRICS_AVERAGE_MPIPUCLK,
+ METRICS_THROTTLER_RESIDENCY_PROCHOT,
+ METRICS_THROTTLER_RESIDENCY_SPL,
+ METRICS_THROTTLER_RESIDENCY_FPPT,
+ METRICS_THROTTLER_RESIDENCY_SPPT,
+ METRICS_THROTTLER_RESIDENCY_THM_CORE,
+ METRICS_THROTTLER_RESIDENCY_THM_GFX,
+ METRICS_THROTTLER_RESIDENCY_THM_SOC,
} MetricsMember_t;
enum smu_cmn2asic_mapping_type {
@@ -1491,6 +1532,17 @@ enum smu_baco_seq {
__dst_size); \
})
+typedef struct {
+ uint16_t LowFreq;
+ uint16_t HighFreq;
+} WifiOneBand_t;
+
+typedef struct {
+ uint32_t WifiBandEntryNum;
+ WifiOneBand_t WifiBandEntry[11];
+ uint32_t MmHubPadding[8];
+} WifiBandEntryTable_t;
+
#if !defined(SWSMU_CODE_LAYER_L2) && !defined(SWSMU_CODE_LAYER_L3) && !defined(SWSMU_CODE_LAYER_L4)
int smu_get_power_limit(void *handle,
uint32_t *limit,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
index 9dd1ed5b8940..b114d14fc053 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
@@ -1615,7 +1615,8 @@ typedef struct {
#define TABLE_I2C_COMMANDS 9
#define TABLE_DRIVER_INFO 10
#define TABLE_ECCINFO 11
-#define TABLE_COUNT 12
+#define TABLE_WIFIBAND 12
+#define TABLE_COUNT 13
//IH Interupt ID
#define IH_INTERRUPT_ID_TO_DRIVER 0xFE
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h
index 62b7c0daff68..8b1496f8ce58 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h
@@ -1605,7 +1605,8 @@ typedef struct {
#define TABLE_I2C_COMMANDS 9
#define TABLE_DRIVER_INFO 10
#define TABLE_ECCINFO 11
-#define TABLE_COUNT 12
+#define TABLE_WIFIBAND 12
+#define TABLE_COUNT 13
//IH Interupt ID
#define IH_INTERRUPT_ID_TO_DRIVER 0xFE
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h
index 22f88842a7fd..5bb7a63c0602 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h
@@ -24,11 +24,6 @@
#ifndef SMU14_DRIVER_IF_V14_0_0_H
#define SMU14_DRIVER_IF_V14_0_0_H
-// *** IMPORTANT ***
-// SMU TEAM: Always increment the interface version if
-// any structure is changed in this file
-#define PMFW_DRIVER_IF_VERSION 6
-
typedef struct {
int32_t value;
uint32_t numFractionalBits;
@@ -150,37 +145,50 @@ typedef struct {
} DpmClocks_t;
typedef struct {
- uint16_t CoreFrequency[16]; //Target core frequency [MHz]
- uint16_t CorePower[16]; //CAC calculated core power [mW]
- uint16_t CoreTemperature[16]; //TSEN measured core temperature [centi-C]
- uint16_t GfxTemperature; //TSEN measured GFX temperature [centi-C]
- uint16_t SocTemperature; //TSEN measured SOC temperature [centi-C]
- uint16_t StapmOpnLimit; //Maximum IRM defined STAPM power limit [mW]
- uint16_t StapmCurrentLimit; //Time filtered STAPM power limit [mW]
- uint16_t InfrastructureCpuMaxFreq; //CCLK frequency limit enforced on classic cores [MHz]
- uint16_t InfrastructureGfxMaxFreq; //GFXCLK frequency limit enforced on GFX [MHz]
- uint16_t SkinTemp; //Maximum skin temperature reported by APU and HS2 chassis sensors [centi-C]
- uint16_t GfxclkFrequency; //Time filtered target GFXCLK frequency [MHz]
- uint16_t FclkFrequency; //Time filtered target FCLK frequency [MHz]
- uint16_t GfxActivity; //Time filtered GFX busy % [0-100]
- uint16_t SocclkFrequency; //Time filtered target SOCCLK frequency [MHz]
- uint16_t VclkFrequency; //Time filtered target VCLK frequency [MHz]
- uint16_t VcnActivity; //Time filtered VCN busy % [0-100]
- uint16_t VpeclkFrequency; //Time filtered target VPECLK frequency [MHz]
- uint16_t IpuclkFrequency; //Time filtered target IPUCLK frequency [MHz]
- uint16_t IpuBusy[8]; //Time filtered IPU per-column busy % [0-100]
- uint16_t DRAMReads; //Time filtered DRAM read bandwidth [MB/sec]
- uint16_t DRAMWrites; //Time filtered DRAM write bandwidth [MB/sec]
- uint16_t CoreC0Residency[16]; //Time filtered per-core C0 residency % [0-100]
- uint16_t IpuPower; //Time filtered IPU power [mW]
- uint32_t ApuPower; //Time filtered APU power [mW]
- uint32_t GfxPower; //Time filtered GFX power [mW]
- uint32_t dGpuPower; //Time filtered dGPU power [mW]
- uint32_t SocketPower; //Time filtered power used for PPT/STAPM [APU+dGPU] [mW]
- uint32_t AllCorePower; //Time filtered sum of core power across all cores in the socket [mW]
- uint32_t FilterAlphaValue; //Metrics table alpha filter time constant [us]
- uint32_t MetricsCounter; //Counter that is incremented on every metrics table update [PM_TIMER cycles]
- uint32_t spare[16];
+ uint16_t CoreFrequency[16]; //Target core frequency [MHz]
+ uint16_t CorePower[16]; //CAC calculated core power [mW]
+ uint16_t CoreTemperature[16]; //TSEN measured core temperature [centi-C]
+ uint16_t GfxTemperature; //TSEN measured GFX temperature [centi-C]
+ uint16_t SocTemperature; //TSEN measured SOC temperature [centi-C]
+ uint16_t StapmOpnLimit; //Maximum IRM defined STAPM power limit [mW]
+ uint16_t StapmCurrentLimit; //Time filtered STAPM power limit [mW]
+ uint16_t InfrastructureCpuMaxFreq; //CCLK frequency limit enforced on classic cores [MHz]
+ uint16_t InfrastructureGfxMaxFreq; //GFXCLK frequency limit enforced on GFX [MHz]
+ uint16_t SkinTemp; //Maximum skin temperature reported by APU and HS2 chassis sensors [centi-C]
+ uint16_t GfxclkFrequency; //Time filtered target GFXCLK frequency [MHz]
+ uint16_t FclkFrequency; //Time filtered target FCLK frequency [MHz]
+ uint16_t GfxActivity; //Time filtered GFX busy % [0-100]
+ uint16_t SocclkFrequency; //Time filtered target SOCCLK frequency [MHz]
+ uint16_t VclkFrequency; //Time filtered target VCLK frequency [MHz]
+ uint16_t VcnActivity; //Time filtered VCN busy % [0-100]
+ uint16_t VpeclkFrequency; //Time filtered target VPECLK frequency [MHz]
+ uint16_t IpuclkFrequency; //Time filtered target IPUCLK frequency [MHz]
+ uint16_t IpuBusy[8]; //Time filtered IPU per-column busy % [0-100]
+ uint16_t DRAMReads; //Time filtered DRAM read bandwidth [MB/sec]
+ uint16_t DRAMWrites; //Time filtered DRAM write bandwidth [MB/sec]
+ uint16_t CoreC0Residency[16]; //Time filtered per-core C0 residency % [0-100]
+ uint16_t IpuPower; //Time filtered IPU power [mW]
+ uint32_t ApuPower; //Time filtered APU power [mW]
+ uint32_t GfxPower; //Time filtered GFX power [mW]
+ uint32_t dGpuPower; //Time filtered dGPU power [mW]
+ uint32_t SocketPower; //Time filtered power used for PPT/STAPM [APU+dGPU] [mW]
+ uint32_t AllCorePower; //Time filtered sum of core power across all cores in the socket [mW]
+ uint32_t FilterAlphaValue; //Metrics table alpha filter time constant [us]
+ uint32_t MetricsCounter; //Counter that is incremented on every metrics table update [PM_TIMER cycles]
+ uint16_t MemclkFrequency; //Time filtered target MEMCLK frequency [MHz]
+ uint16_t MpipuclkFrequency; //Time filtered target MPIPUCLK frequency [MHz]
+ uint16_t IpuReads; //Time filtered IPU read bandwidth [MB/sec]
+ uint16_t IpuWrites; //Time filtered IPU write bandwidth [MB/sec]
+ uint32_t ThrottleResidency_PROCHOT; //Counter that is incremented on every metrics table update when PROCHOT was engaged [PM_TIMER cycles]
+ uint32_t ThrottleResidency_SPL; //Counter that is incremented on every metrics table update when SPL was engaged [PM_TIMER cycles]
+ uint32_t ThrottleResidency_FPPT; //Counter that is incremented on every metrics table update when fast PPT was engaged [PM_TIMER cycles]
+ uint32_t ThrottleResidency_SPPT; //Counter that is incremented on every metrics table update when slow PPT was engaged [PM_TIMER cycles]
+ uint32_t ThrottleResidency_THM_CORE; //Counter that is incremented on every metrics table update when CORE thermal throttling was engaged [PM_TIMER cycles]
+ uint32_t ThrottleResidency_THM_GFX; //Counter that is incremented on every metrics table update when GFX thermal throttling was engaged [PM_TIMER cycles]
+ uint32_t ThrottleResidency_THM_SOC; //Counter that is incremented on every metrics table update when SOC thermal throttling was engaged [PM_TIMER cycles]
+ uint16_t Psys; //Time filtered Psys power [mW]
+ uint16_t spare1;
+ uint32_t spare[6];
} SmuMetrics_t;
//ISP tile definitions
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h
index e2ee855c7748..e862d323caab 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h
@@ -138,10 +138,9 @@
#define PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel 0x4A
#define PPSMC_MSG_SetPriorityDeltaGain 0x4B
#define PPSMC_MSG_AllowIHHostInterrupt 0x4C
-
#define PPSMC_MSG_DALNotPresent 0x4E
-
-#define PPSMC_Message_Count 0x4F
+#define PPSMC_MSG_EnableUCLKShadow 0x51
+#define PPSMC_Message_Count 0x52
//Debug Dump Message
#define DEBUGSMC_MSG_TestMessage 0x1
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
index fef2d290f3f2..7b812b9994d7 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
@@ -123,7 +123,7 @@ typedef enum {
VOLTAGE_GUARDBAND_COUNT
} GFX_GUARDBAND_e;
-#define SMU_METRICS_TABLE_VERSION 0x9
+#define SMU_METRICS_TABLE_VERSION 0xB
typedef struct __attribute__((packed, aligned(4))) {
uint32_t AccumulationCounter;
@@ -219,7 +219,103 @@ typedef struct __attribute__((packed, aligned(4))) {
uint32_t PCIenReplayARolloverCountAcc; // The Pcie counter itself is accumulated
uint32_t PCIeNAKSentCountAcc; // The Pcie counter itself is accumulated
uint32_t PCIeNAKReceivedCountAcc; // The Pcie counter itself is accumulated
-} MetricsTable_t;
+
+ // VCN/JPEG ACTIVITY
+ uint32_t VcnBusy[4];
+ uint32_t JpegBusy[32];
+} MetricsTableX_t;
+
+typedef struct __attribute__((packed, aligned(4))) {
+ uint32_t AccumulationCounter;
+
+ //TEMPERATURE
+ uint32_t MaxSocketTemperature;
+ uint32_t MaxVrTemperature;
+ uint32_t MaxHbmTemperature;
+ uint64_t MaxSocketTemperatureAcc;
+ uint64_t MaxVrTemperatureAcc;
+ uint64_t MaxHbmTemperatureAcc;
+
+ //POWER
+ uint32_t SocketPowerLimit;
+ uint32_t MaxSocketPowerLimit;
+ uint32_t SocketPower;
+
+ //ENERGY
+ uint64_t Timestamp;
+ uint64_t SocketEnergyAcc;
+ uint64_t CcdEnergyAcc;
+ uint64_t XcdEnergyAcc;
+ uint64_t AidEnergyAcc;
+ uint64_t HbmEnergyAcc;
+
+ //FREQUENCY
+ uint32_t CclkFrequencyLimit;
+ uint32_t GfxclkFrequencyLimit;
+ uint32_t FclkFrequency;
+ uint32_t UclkFrequency;
+ uint32_t SocclkFrequency[4];
+ uint32_t VclkFrequency[4];
+ uint32_t DclkFrequency[4];
+ uint32_t LclkFrequency[4];
+ uint64_t GfxclkFrequencyAcc[8];
+ uint64_t CclkFrequencyAcc[96];
+
+ //FREQUENCY RANGE
+ uint32_t MaxCclkFrequency;
+ uint32_t MinCclkFrequency;
+ uint32_t MaxGfxclkFrequency;
+ uint32_t MinGfxclkFrequency;
+ uint32_t FclkFrequencyTable[4];
+ uint32_t UclkFrequencyTable[4];
+ uint32_t SocclkFrequencyTable[4];
+ uint32_t VclkFrequencyTable[4];
+ uint32_t DclkFrequencyTable[4];
+ uint32_t LclkFrequencyTable[4];
+ uint32_t MaxLclkDpmRange;
+ uint32_t MinLclkDpmRange;
+
+ //XGMI
+ uint32_t XgmiWidth;
+ uint32_t XgmiBitrate;
+ uint64_t XgmiReadBandwidthAcc[8];
+ uint64_t XgmiWriteBandwidthAcc[8];
+
+ //ACTIVITY
+ uint32_t SocketC0Residency;
+ uint32_t SocketGfxBusy;
+ uint32_t DramBandwidthUtilization;
+ uint64_t SocketC0ResidencyAcc;
+ uint64_t SocketGfxBusyAcc;
+ uint64_t DramBandwidthAcc;
+ uint32_t MaxDramBandwidth;
+ uint64_t DramBandwidthUtilizationAcc;
+ uint64_t PcieBandwidthAcc[4];
+
+ //THROTTLERS
+ uint32_t ProchotResidencyAcc;
+ uint32_t PptResidencyAcc;
+ uint32_t SocketThmResidencyAcc;
+ uint32_t VrThmResidencyAcc;
+ uint32_t HbmThmResidencyAcc;
+ uint32_t GfxLockXCDMak;
+
+ // New Items at end to maintain driver compatibility
+ uint32_t GfxclkFrequency[8];
+
+ //PSNs
+ uint64_t PublicSerialNumber_AID[4];
+ uint64_t PublicSerialNumber_XCD[8];
+ uint64_t PublicSerialNumber_CCD[12];
+
+ //XGMI Data tranfser size
+ uint64_t XgmiReadDataSizeAcc[8];//in KByte
+ uint64_t XgmiWriteDataSizeAcc[8];//in KByte
+
+ // VCN/JPEG ACTIVITY
+ uint32_t VcnBusy[4];
+ uint32_t JpegBusy[32];
+} MetricsTableA_t;
#define SMU_VF_METRICS_TABLE_VERSION 0x3
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_7_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_7_ppsmc.h
index 6aaefca9b595..a6bf9cdd130e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_7_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_7_ppsmc.h
@@ -134,6 +134,7 @@
#define PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel 0x4A
#define PPSMC_MSG_SetPriorityDeltaGain 0x4B
#define PPSMC_MSG_AllowIHHostInterrupt 0x4C
-#define PPSMC_Message_Count 0x4D
+#define PPSMC_MSG_EnableUCLKShadow 0x51
+#define PPSMC_Message_Count 0x52
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
index ea8ea99b7436..953a767613b1 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
@@ -260,7 +260,8 @@
__SMU_DUMMY_MAP(PowerDownUmsch), \
__SMU_DUMMY_MAP(SetSoftMaxVpe), \
__SMU_DUMMY_MAP(SetSoftMinVpe), \
- __SMU_DUMMY_MAP(GetMetricsVersion),
+ __SMU_DUMMY_MAP(GetMetricsVersion), \
+ __SMU_DUMMY_MAP(EnableUCLKShadow),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index 48f5926d8153..fbd57fa1a004 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -212,10 +212,6 @@ int smu_v13_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
bool smu_v13_0_baco_is_support(struct smu_context *smu);
-enum smu_baco_state smu_v13_0_baco_get_state(struct smu_context *smu);
-
-int smu_v13_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state);
-
int smu_v13_0_baco_enter(struct smu_context *smu);
int smu_v13_0_baco_exit(struct smu_context *smu);
@@ -298,5 +294,9 @@ int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
int smu_v13_0_disable_pmfw_state(struct smu_context *smu);
+int smu_v13_0_enable_uclk_shadow(struct smu_context *smu, bool enable);
+
+int smu_v13_0_set_wbrf_exclusion_ranges(struct smu_context *smu,
+ struct freq_band_range *exclusion_ranges);
#endif
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
index a5b569976f19..3f7463c1c1a9 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
@@ -26,8 +26,8 @@
#include "amdgpu_smu.h"
#define SMU14_DRIVER_IF_VERSION_INV 0xFFFFFFFF
+#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_0 0x7
#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x1
-#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_0 0x6
#define FEATURE_MASK(feature) (1ULL << feature)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 2cb6b68222ba..4cd43bbec910 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -2407,8 +2407,6 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
.get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
.baco_is_support = smu_v11_0_baco_is_support,
- .baco_get_state = smu_v11_0_baco_get_state,
- .baco_set_state = smu_v11_0_baco_set_state,
.baco_enter = smu_v11_0_baco_enter,
.baco_exit = smu_v11_0_baco_exit,
.get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index a38233cc5b7f..8d1d29ffb0f1 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -3537,8 +3537,6 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
.get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
.baco_is_support = smu_v11_0_baco_is_support,
- .baco_get_state = smu_v11_0_baco_get_state,
- .baco_set_state = smu_v11_0_baco_set_state,
.baco_enter = navi10_baco_enter,
.baco_exit = navi10_baco_exit,
.get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 1de9f8b5cc5f..21fc033528fa 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -4428,8 +4428,6 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
.get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
.baco_is_support = smu_v11_0_baco_is_support,
- .baco_get_state = smu_v11_0_baco_get_state,
- .baco_set_state = smu_v11_0_baco_set_state,
.baco_enter = sienna_cichlid_baco_enter,
.baco_exit = sienna_cichlid_baco_exit,
.mode1_reset_is_support = sienna_cichlid_is_mode1_reset_supported,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index f1440869d1ce..dd9bcbd630a1 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -1530,7 +1530,6 @@ static int aldebaran_i2c_control_init(struct smu_context *smu)
smu_i2c->port = 0;
mutex_init(&smu_i2c->mutex);
control->owner = THIS_MODULE;
- control->class = I2C_CLASS_SPD;
control->dev.parent = &adev->pdev->dev;
control->algo = &aldebaran_i2c_algo;
snprintf(control->name, sizeof(control->name), "AMDGPU SMU 0");
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index 86fc7273d588..771a3d457c33 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -2221,33 +2221,14 @@ static int smu_v13_0_baco_set_armd3_sequence(struct smu_context *smu,
return 0;
}
-bool smu_v13_0_baco_is_support(struct smu_context *smu)
-{
- struct smu_baco_context *smu_baco = &smu->smu_baco;
-
- if (amdgpu_sriov_vf(smu->adev) ||
- !smu_baco->platform_support)
- return false;
-
- /* return true if ASIC is in BACO state already */
- if (smu_v13_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER)
- return true;
-
- if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
- !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
- return false;
-
- return true;
-}
-
-enum smu_baco_state smu_v13_0_baco_get_state(struct smu_context *smu)
+static enum smu_baco_state smu_v13_0_baco_get_state(struct smu_context *smu)
{
struct smu_baco_context *smu_baco = &smu->smu_baco;
return smu_baco->state;
}
-int smu_v13_0_baco_set_state(struct smu_context *smu,
+static int smu_v13_0_baco_set_state(struct smu_context *smu,
enum smu_baco_state state)
{
struct smu_baco_context *smu_baco = &smu->smu_baco;
@@ -2281,6 +2262,24 @@ int smu_v13_0_baco_set_state(struct smu_context *smu,
return ret;
}
+bool smu_v13_0_baco_is_support(struct smu_context *smu)
+{
+ struct smu_baco_context *smu_baco = &smu->smu_baco;
+
+ if (amdgpu_sriov_vf(smu->adev) || !smu_baco->platform_support)
+ return false;
+
+ /* return true if ASIC is in BACO state already */
+ if (smu_v13_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER)
+ return true;
+
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
+ !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
+ return false;
+
+ return true;
+}
+
int smu_v13_0_baco_enter(struct smu_context *smu)
{
struct smu_baco_context *smu_baco = &smu->smu_baco;
@@ -2508,3 +2507,51 @@ int smu_v13_0_disable_pmfw_state(struct smu_context *smu)
return ret == 0 ? 0 : -EINVAL;
}
+
+int smu_v13_0_enable_uclk_shadow(struct smu_context *smu, bool enable)
+{
+ return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableUCLKShadow, enable, NULL);
+}
+
+int smu_v13_0_set_wbrf_exclusion_ranges(struct smu_context *smu,
+ struct freq_band_range *exclusion_ranges)
+{
+ WifiBandEntryTable_t wifi_bands;
+ int valid_entries = 0;
+ int ret, i;
+
+ memset(&wifi_bands, 0, sizeof(wifi_bands));
+ for (i = 0; i < ARRAY_SIZE(wifi_bands.WifiBandEntry); i++) {
+ if (!exclusion_ranges[i].start && !exclusion_ranges[i].end)
+ break;
+
+ /* PMFW expects the inputs to be in Mhz unit */
+ wifi_bands.WifiBandEntry[valid_entries].LowFreq =
+ DIV_ROUND_DOWN_ULL(exclusion_ranges[i].start, HZ_PER_MHZ);
+ wifi_bands.WifiBandEntry[valid_entries++].HighFreq =
+ DIV_ROUND_UP_ULL(exclusion_ranges[i].end, HZ_PER_MHZ);
+ }
+ wifi_bands.WifiBandEntryNum = valid_entries;
+
+ /*
+ * Per confirm with PMFW team, WifiBandEntryNum = 0
+ * is a valid setting.
+ *
+ * Considering the scenarios below:
+ * - At first the wifi device adds an exclusion range e.g. (2400,2500) to
+ * BIOS and our driver gets notified. We will set WifiBandEntryNum = 1
+ * and pass the WifiBandEntry (2400, 2500) to PMFW.
+ *
+ * - Later the wifi device removes the wifiband list added above and
+ * our driver gets notified again. At this time, driver will set
+ * WifiBandEntryNum = 0 and pass an empty WifiBandEntry list to PMFW.
+ *
+ * - PMFW may still need to do some uclk shadow update(e.g. switching
+ * from shadow clock back to primary clock) on receiving this.
+ */
+ ret = smu_cmn_update_table(smu, SMU_TABLE_WIFIBAND, 0, &wifi_bands, true);
+ if (ret)
+ dev_warn(smu->adev->dev, "Failed to set wifiband!");
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index a4debd7ab8bd..a9b25faa63e4 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -169,6 +169,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(AllowIHHostInterrupt, PPSMC_MSG_AllowIHHostInterrupt, 0),
MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt, 0),
MSG_MAP(DALNotPresent, PPSMC_MSG_DALNotPresent, 0),
+ MSG_MAP(EnableUCLKShadow, PPSMC_MSG_EnableUCLKShadow, 0),
};
static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = {
@@ -253,6 +254,7 @@ static struct cmn2asic_mapping smu_v13_0_0_table_map[SMU_TABLE_COUNT] = {
TAB_MAP(I2C_COMMANDS),
TAB_MAP(ECCINFO),
TAB_MAP(OVERDRIVE),
+ TAB_MAP(WIFIBAND),
};
static struct cmn2asic_mapping smu_v13_0_0_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
@@ -498,6 +500,9 @@ static int smu_v13_0_0_tables_init(struct smu_context *smu)
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_ECCINFO, sizeof(EccInfoTable_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, SMU_TABLE_WIFIBAND,
+ sizeof(WifiBandEntryTable_t), PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM);
smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL);
if (!smu_table->metrics_table)
@@ -2540,16 +2545,19 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
workload_mask = 1 << workload_type;
- /* Add optimizations for SMU13.0.0. Reuse the power saving profile */
- if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE &&
- (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0)) &&
- ((smu->adev->pm.fw_version == 0x004e6601) ||
- (smu->adev->pm.fw_version >= 0x004e7400))) {
- workload_type = smu_cmn_to_asic_specific_index(smu,
- CMN2ASIC_MAPPING_WORKLOAD,
- PP_SMC_POWER_PROFILE_POWERSAVING);
- if (workload_type >= 0)
- workload_mask |= 1 << workload_type;
+ /* Add optimizations for SMU13.0.0/10. Reuse the power saving profile */
+ if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE) {
+ if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) &&
+ ((smu->adev->pm.fw_version == 0x004e6601) ||
+ (smu->adev->pm.fw_version >= 0x004e7300))) ||
+ (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) &&
+ smu->adev->pm.fw_version >= 0x00504500)) {
+ workload_type = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_WORKLOAD,
+ PP_SMC_POWER_PROFILE_POWERSAVING);
+ if (workload_type >= 0)
+ workload_mask |= 1 << workload_type;
+ }
}
return smu_cmn_send_smc_msg_with_param(smu,
@@ -2688,7 +2696,6 @@ static int smu_v13_0_0_i2c_control_init(struct smu_context *smu)
smu_i2c->port = i;
mutex_init(&smu_i2c->mutex);
control->owner = THIS_MODULE;
- control->class = I2C_CLASS_SPD;
control->dev.parent = &adev->pdev->dev;
control->algo = &smu_v13_0_0_i2c_algo;
snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i);
@@ -2938,6 +2945,20 @@ static ssize_t smu_v13_0_0_get_ecc_info(struct smu_context *smu,
return ret;
}
+static bool smu_v13_0_0_wbrf_support_check(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(13, 0, 0):
+ return smu->smc_fw_version >= 0x004e6300;
+ case IP_VERSION(13, 0, 10):
+ return smu->smc_fw_version >= 0x00503300;
+ default:
+ return false;
+ }
+}
+
static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.get_allowed_feature_mask = smu_v13_0_0_get_allowed_feature_mask,
.set_default_dpm_table = smu_v13_0_0_set_default_dpm_table,
@@ -3003,8 +3024,6 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.deep_sleep_control = smu_v13_0_deep_sleep_control,
.gfx_ulv_control = smu_v13_0_gfx_ulv_control,
.baco_is_support = smu_v13_0_baco_is_support,
- .baco_get_state = smu_v13_0_baco_get_state,
- .baco_set_state = smu_v13_0_baco_set_state,
.baco_enter = smu_v13_0_baco_enter,
.baco_exit = smu_v13_0_baco_exit,
.mode1_reset_is_support = smu_v13_0_0_is_mode1_reset_supported,
@@ -3018,6 +3037,9 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.gpo_control = smu_v13_0_gpo_control,
.get_ecc_info = smu_v13_0_0_get_ecc_info,
.notify_display_change = smu_v13_0_notify_display_change,
+ .is_asic_wbrf_supported = smu_v13_0_0_wbrf_support_check,
+ .enable_uclk_shadow = smu_v13_0_enable_uclk_shadow,
+ .set_wbrf_exclusion_ranges = smu_v13_0_set_wbrf_exclusion_ranges,
};
void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index dda2249c4994..3c98a8a0386a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -248,6 +248,8 @@ struct PPTable_t {
#define SMUQ10_TO_UINT(x) ((x) >> 10)
#define SMUQ10_FRAC(x) ((x) & 0x3ff)
#define SMUQ10_ROUND(x) ((SMUQ10_TO_UINT(x)) + ((SMUQ10_FRAC(x)) >= 0x200))
+#define GET_METRIC_FIELD(field) ((adev->flags & AMD_IS_APU) ?\
+ (metrics_a->field) : (metrics_x->field))
struct smu_v13_0_6_dpm_map {
enum smu_clk_type clk_type;
@@ -330,7 +332,8 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu)
SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
- SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(MetricsTable_t),
+ SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS,
+ max(sizeof(MetricsTableX_t), sizeof(MetricsTableA_t)),
PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT);
@@ -338,12 +341,13 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu)
PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT);
- smu_table->metrics_table = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL);
+ smu_table->metrics_table = kzalloc(max(sizeof(MetricsTableX_t),
+ sizeof(MetricsTableA_t)), GFP_KERNEL);
if (!smu_table->metrics_table)
return -ENOMEM;
smu_table->metrics_time = 0;
- smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_4);
+ smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_5);
smu_table->gpu_metrics_table =
kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
if (!smu_table->gpu_metrics_table) {
@@ -469,9 +473,11 @@ static ssize_t smu_v13_0_6_get_pm_metrics(struct smu_context *smu,
static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
- MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table;
+ MetricsTableX_t *metrics_x = (MetricsTableX_t *)smu_table->metrics_table;
+ MetricsTableA_t *metrics_a = (MetricsTableA_t *)smu_table->metrics_table;
struct PPTable_t *pptable =
(struct PPTable_t *)smu_table->driver_pptable;
+ struct amdgpu_device *adev = smu->adev;
int ret, i, retry = 100;
uint32_t table_version;
@@ -483,7 +489,7 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
return ret;
/* Ensure that metrics have been updated */
- if (metrics->AccumulationCounter)
+ if (GET_METRIC_FIELD(AccumulationCounter))
break;
usleep_range(1000, 1100);
@@ -500,29 +506,29 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
table_version;
pptable->MaxSocketPowerLimit =
- SMUQ10_ROUND(metrics->MaxSocketPowerLimit);
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketPowerLimit));
pptable->MaxGfxclkFrequency =
- SMUQ10_ROUND(metrics->MaxGfxclkFrequency);
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxGfxclkFrequency));
pptable->MinGfxclkFrequency =
- SMUQ10_ROUND(metrics->MinGfxclkFrequency);
+ SMUQ10_ROUND(GET_METRIC_FIELD(MinGfxclkFrequency));
for (i = 0; i < 4; ++i) {
pptable->FclkFrequencyTable[i] =
- SMUQ10_ROUND(metrics->FclkFrequencyTable[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequencyTable)[i]);
pptable->UclkFrequencyTable[i] =
- SMUQ10_ROUND(metrics->UclkFrequencyTable[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequencyTable)[i]);
pptable->SocclkFrequencyTable[i] = SMUQ10_ROUND(
- metrics->SocclkFrequencyTable[i]);
+ GET_METRIC_FIELD(SocclkFrequencyTable)[i]);
pptable->VclkFrequencyTable[i] =
- SMUQ10_ROUND(metrics->VclkFrequencyTable[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequencyTable)[i]);
pptable->DclkFrequencyTable[i] =
- SMUQ10_ROUND(metrics->DclkFrequencyTable[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequencyTable)[i]);
pptable->LclkFrequencyTable[i] =
- SMUQ10_ROUND(metrics->LclkFrequencyTable[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(LclkFrequencyTable)[i]);
}
/* use AID0 serial number by default */
- pptable->PublicSerialNumber_AID = metrics->PublicSerialNumber_AID[0];
+ pptable->PublicSerialNumber_AID = GET_METRIC_FIELD(PublicSerialNumber_AID)[0];
pptable->Init = true;
}
@@ -824,7 +830,8 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
uint32_t *value)
{
struct smu_table_context *smu_table = &smu->smu_table;
- MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table;
+ MetricsTableX_t *metrics_x = (MetricsTableX_t *)smu_table->metrics_table;
+ MetricsTableA_t *metrics_a = (MetricsTableA_t *)smu_table->metrics_table;
struct amdgpu_device *adev = smu->adev;
int ret = 0;
int xcc_id;
@@ -839,50 +846,50 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
case METRICS_AVERAGE_GFXCLK:
if (smu->smc_fw_version >= 0x552F00) {
xcc_id = GET_INST(GC, 0);
- *value = SMUQ10_ROUND(metrics->GfxclkFrequency[xcc_id]);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency)[xcc_id]);
} else {
*value = 0;
}
break;
case METRICS_CURR_SOCCLK:
case METRICS_AVERAGE_SOCCLK:
- *value = SMUQ10_ROUND(metrics->SocclkFrequency[0]);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency)[0]);
break;
case METRICS_CURR_UCLK:
case METRICS_AVERAGE_UCLK:
- *value = SMUQ10_ROUND(metrics->UclkFrequency);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency));
break;
case METRICS_CURR_VCLK:
- *value = SMUQ10_ROUND(metrics->VclkFrequency[0]);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency)[0]);
break;
case METRICS_CURR_DCLK:
- *value = SMUQ10_ROUND(metrics->DclkFrequency[0]);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency)[0]);
break;
case METRICS_CURR_FCLK:
- *value = SMUQ10_ROUND(metrics->FclkFrequency);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequency));
break;
case METRICS_AVERAGE_GFXACTIVITY:
- *value = SMUQ10_ROUND(metrics->SocketGfxBusy);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy));
break;
case METRICS_AVERAGE_MEMACTIVITY:
- *value = SMUQ10_ROUND(metrics->DramBandwidthUtilization);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization));
break;
case METRICS_CURR_SOCKETPOWER:
- *value = SMUQ10_ROUND(metrics->SocketPower) << 8;
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower)) << 8;
break;
case METRICS_TEMPERATURE_HOTSPOT:
- *value = SMUQ10_ROUND(metrics->MaxSocketTemperature) *
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature)) *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
case METRICS_TEMPERATURE_MEM:
- *value = SMUQ10_ROUND(metrics->MaxHbmTemperature) *
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature)) *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
/* This is the max of all VRs and not just SOC VR.
* No need to define another data type for the same.
*/
case METRICS_TEMPERATURE_VRSOC:
- *value = SMUQ10_ROUND(metrics->MaxVrTemperature) *
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature)) *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
default:
@@ -963,7 +970,9 @@ static int smu_v13_0_6_print_clks(struct smu_context *smu, char *buf, int size,
if (i < (clocks.num_levels - 1))
clk2 = clocks.data[i + 1].clocks_in_khz / 1000;
- if (curr_clk >= clk1 && curr_clk < clk2) {
+ if (curr_clk == clk1) {
+ level = i;
+ } else if (curr_clk >= clk1 && curr_clk < clk2) {
level = (curr_clk - clk1) <= (clk2 - curr_clk) ?
i :
i + 1;
@@ -1929,7 +1938,6 @@ static int smu_v13_0_6_i2c_control_init(struct smu_context *smu)
smu_i2c->port = i;
mutex_init(&smu_i2c->mutex);
control->owner = THIS_MODULE;
- control->class = I2C_CLASS_SPD;
control->dev.parent = &adev->pdev->dev;
control->algo = &smu_v13_0_6_i2c_algo;
snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i);
@@ -2067,67 +2075,70 @@ static int smu_v13_0_6_get_current_pcie_link_speed(struct smu_context *smu)
static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table)
{
struct smu_table_context *smu_table = &smu->smu_table;
- struct gpu_metrics_v1_4 *gpu_metrics =
- (struct gpu_metrics_v1_4 *)smu_table->gpu_metrics_table;
+ struct gpu_metrics_v1_5 *gpu_metrics =
+ (struct gpu_metrics_v1_5 *)smu_table->gpu_metrics_table;
struct amdgpu_device *adev = smu->adev;
- int ret = 0, xcc_id, inst, i;
- MetricsTable_t *metrics;
+ int ret = 0, xcc_id, inst, i, j;
+ MetricsTableX_t *metrics_x;
+ MetricsTableA_t *metrics_a;
u16 link_width_level;
- metrics = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL);
- ret = smu_v13_0_6_get_metrics_table(smu, metrics, true);
+ metrics_x = kzalloc(max(sizeof(MetricsTableX_t), sizeof(MetricsTableA_t)), GFP_KERNEL);
+ ret = smu_v13_0_6_get_metrics_table(smu, metrics_x, true);
if (ret) {
- kfree(metrics);
+ kfree(metrics_x);
return ret;
}
- smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 4);
+ metrics_a = (MetricsTableA_t *)metrics_x;
+
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 5);
gpu_metrics->temperature_hotspot =
- SMUQ10_ROUND(metrics->MaxSocketTemperature);
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature));
/* Individual HBM stack temperature is not reported */
gpu_metrics->temperature_mem =
- SMUQ10_ROUND(metrics->MaxHbmTemperature);
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature));
/* Reports max temperature of all voltage rails */
gpu_metrics->temperature_vrsoc =
- SMUQ10_ROUND(metrics->MaxVrTemperature);
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature));
gpu_metrics->average_gfx_activity =
- SMUQ10_ROUND(metrics->SocketGfxBusy);
+ SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy));
gpu_metrics->average_umc_activity =
- SMUQ10_ROUND(metrics->DramBandwidthUtilization);
+ SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization));
gpu_metrics->curr_socket_power =
- SMUQ10_ROUND(metrics->SocketPower);
+ SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower));
/* Energy counter reported in 15.259uJ (2^-16) units */
- gpu_metrics->energy_accumulator = metrics->SocketEnergyAcc;
+ gpu_metrics->energy_accumulator = GET_METRIC_FIELD(SocketEnergyAcc);
for (i = 0; i < MAX_GFX_CLKS; i++) {
xcc_id = GET_INST(GC, i);
if (xcc_id >= 0)
gpu_metrics->current_gfxclk[i] =
- SMUQ10_ROUND(metrics->GfxclkFrequency[xcc_id]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency)[xcc_id]);
if (i < MAX_CLKS) {
gpu_metrics->current_socclk[i] =
- SMUQ10_ROUND(metrics->SocclkFrequency[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency)[i]);
inst = GET_INST(VCN, i);
if (inst >= 0) {
gpu_metrics->current_vclk0[i] =
- SMUQ10_ROUND(metrics->VclkFrequency[inst]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency)[inst]);
gpu_metrics->current_dclk0[i] =
- SMUQ10_ROUND(metrics->DclkFrequency[inst]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency)[inst]);
}
}
}
- gpu_metrics->current_uclk = SMUQ10_ROUND(metrics->UclkFrequency);
+ gpu_metrics->current_uclk = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency));
/* Throttle status is not reported through metrics now */
gpu_metrics->throttle_status = 0;
/* Clock Lock Status. Each bit corresponds to each GFXCLK instance */
- gpu_metrics->gfxclk_lock_status = metrics->GfxLockXCDMak >> GET_INST(GC, 0);
+ gpu_metrics->gfxclk_lock_status = GET_METRIC_FIELD(GfxLockXCDMak) >> GET_INST(GC, 0);
if (!(adev->flags & AMD_IS_APU)) {
link_width_level = smu_v13_0_6_get_current_pcie_link_width_level(smu);
@@ -2139,38 +2150,57 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
gpu_metrics->pcie_link_speed =
smu_v13_0_6_get_current_pcie_link_speed(smu);
gpu_metrics->pcie_bandwidth_acc =
- SMUQ10_ROUND(metrics->PcieBandwidthAcc[0]);
+ SMUQ10_ROUND(metrics_x->PcieBandwidthAcc[0]);
gpu_metrics->pcie_bandwidth_inst =
- SMUQ10_ROUND(metrics->PcieBandwidth[0]);
+ SMUQ10_ROUND(metrics_x->PcieBandwidth[0]);
gpu_metrics->pcie_l0_to_recov_count_acc =
- metrics->PCIeL0ToRecoveryCountAcc;
+ metrics_x->PCIeL0ToRecoveryCountAcc;
gpu_metrics->pcie_replay_count_acc =
- metrics->PCIenReplayAAcc;
+ metrics_x->PCIenReplayAAcc;
gpu_metrics->pcie_replay_rover_count_acc =
- metrics->PCIenReplayARolloverCountAcc;
+ metrics_x->PCIenReplayARolloverCountAcc;
+ gpu_metrics->pcie_nak_sent_count_acc =
+ metrics_x->PCIeNAKSentCountAcc;
+ gpu_metrics->pcie_nak_rcvd_count_acc =
+ metrics_x->PCIeNAKReceivedCountAcc;
}
gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
gpu_metrics->gfx_activity_acc =
- SMUQ10_ROUND(metrics->SocketGfxBusyAcc);
+ SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusyAcc));
gpu_metrics->mem_activity_acc =
- SMUQ10_ROUND(metrics->DramBandwidthUtilizationAcc);
+ SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilizationAcc));
for (i = 0; i < NUM_XGMI_LINKS; i++) {
gpu_metrics->xgmi_read_data_acc[i] =
- SMUQ10_ROUND(metrics->XgmiReadDataSizeAcc[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(XgmiReadDataSizeAcc)[i]);
gpu_metrics->xgmi_write_data_acc[i] =
- SMUQ10_ROUND(metrics->XgmiWriteDataSizeAcc[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWriteDataSizeAcc)[i]);
+ }
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ inst = GET_INST(JPEG, i);
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+ gpu_metrics->jpeg_activity[(i * adev->jpeg.num_jpeg_rings) + j] =
+ SMUQ10_ROUND(GET_METRIC_FIELD(JpegBusy)
+ [(inst * adev->jpeg.num_jpeg_rings) + j]);
+ }
+ }
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ inst = GET_INST(VCN, i);
+ gpu_metrics->vcn_activity[i] =
+ SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy)[inst]);
}
- gpu_metrics->xgmi_link_width = SMUQ10_ROUND(metrics->XgmiWidth);
- gpu_metrics->xgmi_link_speed = SMUQ10_ROUND(metrics->XgmiBitrate);
+ gpu_metrics->xgmi_link_width = SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWidth));
+ gpu_metrics->xgmi_link_speed = SMUQ10_ROUND(GET_METRIC_FIELD(XgmiBitrate));
- gpu_metrics->firmware_timestamp = metrics->Timestamp;
+ gpu_metrics->firmware_timestamp = GET_METRIC_FIELD(Timestamp);
*table = (void *)gpu_metrics;
- kfree(metrics);
+ kfree(metrics_x);
return sizeof(*gpu_metrics);
}
@@ -2206,17 +2236,18 @@ static int smu_v13_0_6_mode2_reset(struct smu_context *smu)
continue;
}
- if (ret) {
- dev_err(adev->dev,
- "failed to send mode2 message \tparam: 0x%08x error code %d\n",
- SMU_RESET_MODE_2, ret);
+ if (ret)
goto out;
- }
+
} while (ret == -ETIME && timeout);
out:
mutex_unlock(&smu->message_lock);
+ if (ret)
+ dev_err(adev->dev, "failed to send mode2 reset, error code %d",
+ ret);
+
return ret;
}
@@ -2524,9 +2555,9 @@ static int mca_umc_mca_get_err_count(const struct mca_ras_info *mca_ras, struct
return 0;
}
- if (type == AMDGPU_MCA_ERROR_TYPE_UE && umc_v12_0_is_uncorrectable_error(status0))
+ if (type == AMDGPU_MCA_ERROR_TYPE_UE && umc_v12_0_is_uncorrectable_error(adev, status0))
*count = 1;
- else if (type == AMDGPU_MCA_ERROR_TYPE_CE && umc_v12_0_is_correctable_error(status0))
+ else if (type == AMDGPU_MCA_ERROR_TYPE_CE && umc_v12_0_is_correctable_error(adev, status0))
*count = 1;
return 0;
@@ -2537,13 +2568,15 @@ static int mca_pcs_xgmi_mca_get_err_count(const struct mca_ras_info *mca_ras, st
uint32_t *count)
{
u32 ext_error_code;
+ u32 err_cnt;
ext_error_code = MCA_REG__STATUS__ERRORCODEEXT(entry->regs[MCA_REG_IDX_STATUS]);
+ err_cnt = MCA_REG__MISC0__ERRCNT(entry->regs[MCA_REG_IDX_MISC0]);
if (type == AMDGPU_MCA_ERROR_TYPE_UE && ext_error_code == 0)
- *count = 1;
+ *count = err_cnt;
else if (type == AMDGPU_MCA_ERROR_TYPE_CE && ext_error_code == 6)
- *count = 1;
+ *count = err_cnt;
return 0;
}
@@ -2635,6 +2668,7 @@ static bool mca_gfx_smu_bank_is_valid(const struct mca_ras_info *mca_ras, struct
static bool mca_smu_bank_is_valid(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev,
enum amdgpu_mca_error_type type, struct mca_bank_entry *entry)
{
+ struct smu_context *smu = adev->powerplay.pp_handle;
uint32_t errcode, instlo;
instlo = REG_GET_FIELD(entry->regs[MCA_REG_IDX_IPID], MCMP1_IPIDT0, InstanceIdLo);
@@ -2642,7 +2676,13 @@ static bool mca_smu_bank_is_valid(const struct mca_ras_info *mca_ras, struct amd
if (instlo != 0x03b30400)
return false;
- errcode = REG_GET_FIELD(entry->regs[MCA_REG_IDX_STATUS], MCMP1_STATUST0, ErrorCode);
+ if (!(adev->flags & AMD_IS_APU) && smu->smc_fw_version >= 0x00555600) {
+ errcode = MCA_REG__SYND__ERRORINFORMATION(entry->regs[MCA_REG_IDX_SYND]);
+ errcode &= 0xff;
+ } else {
+ errcode = REG_GET_FIELD(entry->regs[MCA_REG_IDX_STATUS], MCMP1_STATUST0, ErrorCode);
+ }
+
return mca_smu_check_error_code(adev, mca_ras, errcode);
}
@@ -2855,6 +2895,13 @@ static int smu_v13_0_6_select_xgmi_plpd_policy(struct smu_context *smu,
return ret;
}
+static ssize_t smu_v13_0_6_get_ecc_info(struct smu_context *smu,
+ void *table)
+{
+ /* Support ecc info by default */
+ return 0;
+}
+
static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
/* init dpm */
.get_allowed_feature_mask = smu_v13_0_6_get_allowed_feature_mask,
@@ -2909,6 +2956,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.i2c_init = smu_v13_0_6_i2c_control_init,
.i2c_fini = smu_v13_0_6_i2c_control_fini,
.send_hbm_bad_pages_num = smu_v13_0_6_smu_send_hbm_bad_page_num,
+ .get_ecc_info = smu_v13_0_6_get_ecc_info,
};
void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index f5596f031d00..59606a19e3d2 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -140,6 +140,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(AllowGpo, PPSMC_MSG_SetGpoAllow, 0),
MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0),
MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0),
+ MSG_MAP(EnableUCLKShadow, PPSMC_MSG_EnableUCLKShadow, 0),
};
static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = {
@@ -222,6 +223,7 @@ static struct cmn2asic_mapping smu_v13_0_7_table_map[SMU_TABLE_COUNT] = {
TAB_MAP(ACTIVITY_MONITOR_COEFF),
[SMU_TABLE_COMBO_PPTABLE] = {1, TABLE_COMBO_PPTABLE},
TAB_MAP(OVERDRIVE),
+ TAB_MAP(WIFIBAND),
};
static struct cmn2asic_mapping smu_v13_0_7_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
@@ -512,6 +514,9 @@ static int smu_v13_0_7_tables_init(struct smu_context *smu)
AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_COMBO_PPTABLE, MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, SMU_TABLE_WIFIBAND,
+ sizeof(WifiBandEntryTable_t), PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM);
smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL);
if (!smu_table->metrics_table)
@@ -2535,6 +2540,11 @@ static int smu_v13_0_7_set_df_cstate(struct smu_context *smu,
NULL);
}
+static bool smu_v13_0_7_wbrf_support_check(struct smu_context *smu)
+{
+ return smu->smc_fw_version > 0x00524600;
+}
+
static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.get_allowed_feature_mask = smu_v13_0_7_get_allowed_feature_mask,
.set_default_dpm_table = smu_v13_0_7_set_default_dpm_table,
@@ -2594,8 +2604,6 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
.baco_is_support = smu_v13_0_baco_is_support,
- .baco_get_state = smu_v13_0_baco_get_state,
- .baco_set_state = smu_v13_0_baco_set_state,
.baco_enter = smu_v13_0_baco_enter,
.baco_exit = smu_v13_0_baco_exit,
.mode1_reset_is_support = smu_v13_0_7_is_mode1_reset_supported,
@@ -2603,6 +2611,9 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.set_mp1_state = smu_v13_0_7_set_mp1_state,
.set_df_cstate = smu_v13_0_7_set_df_cstate,
.gpo_control = smu_v13_0_gpo_control,
+ .is_asic_wbrf_supported = smu_v13_0_7_wbrf_support_check,
+ .enable_uclk_shadow = smu_v13_0_enable_uclk_shadow,
+ .set_wbrf_exclusion_ranges = smu_v13_0_set_wbrf_exclusion_ranges,
};
void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
index d8f8ad0e7137..4894f7ee737b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
@@ -224,7 +224,7 @@ int smu_v14_0_check_fw_version(struct smu_context *smu)
if (smu->is_apu)
adev->pm.fw_version = smu_version;
- switch (adev->ip_versions[MP1_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(14, 0, 2):
smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_2;
break;
@@ -235,7 +235,7 @@ int smu_v14_0_check_fw_version(struct smu_context *smu)
break;
default:
dev_err(adev->dev, "smu unsupported IP version: 0x%x.\n",
- adev->ip_versions[MP1_HWIP][0]);
+ amdgpu_ip_version(adev, MP1_HWIP, 0));
smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_INV;
break;
}
@@ -733,7 +733,7 @@ int smu_v14_0_gfx_off_control(struct smu_context *smu, bool enable)
int ret = 0;
struct amdgpu_device *adev = smu->adev;
- switch (adev->ip_versions[MP1_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(14, 0, 2):
case IP_VERSION(14, 0, 0):
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
index 03b38c3a9968..47fdbae4adfc 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
@@ -246,11 +246,20 @@ static int smu_v14_0_0_get_smu_metrics_data(struct smu_context *smu,
*value = 0;
break;
case METRICS_AVERAGE_UCLK:
- *value = 0;
+ *value = metrics->MemclkFrequency;
break;
case METRICS_AVERAGE_FCLK:
*value = metrics->FclkFrequency;
break;
+ case METRICS_AVERAGE_VPECLK:
+ *value = metrics->VpeclkFrequency;
+ break;
+ case METRICS_AVERAGE_IPUCLK:
+ *value = metrics->IpuclkFrequency;
+ break;
+ case METRICS_AVERAGE_MPIPUCLK:
+ *value = metrics->MpipuclkFrequency;
+ break;
case METRICS_AVERAGE_GFXACTIVITY:
*value = metrics->GfxActivity / 100;
break;
@@ -270,8 +279,26 @@ static int smu_v14_0_0_get_smu_metrics_data(struct smu_context *smu,
*value = metrics->SocTemperature / 100 *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
- case METRICS_THROTTLER_STATUS:
- *value = 0;
+ case METRICS_THROTTLER_RESIDENCY_PROCHOT:
+ *value = metrics->ThrottleResidency_PROCHOT;
+ break;
+ case METRICS_THROTTLER_RESIDENCY_SPL:
+ *value = metrics->ThrottleResidency_SPL;
+ break;
+ case METRICS_THROTTLER_RESIDENCY_FPPT:
+ *value = metrics->ThrottleResidency_FPPT;
+ break;
+ case METRICS_THROTTLER_RESIDENCY_SPPT:
+ *value = metrics->ThrottleResidency_SPPT;
+ break;
+ case METRICS_THROTTLER_RESIDENCY_THM_CORE:
+ *value = metrics->ThrottleResidency_THM_CORE;
+ break;
+ case METRICS_THROTTLER_RESIDENCY_THM_GFX:
+ *value = metrics->ThrottleResidency_THM_GFX;
+ break;
+ case METRICS_THROTTLER_RESIDENCY_THM_SOC:
+ *value = metrics->ThrottleResidency_THM_SOC;
break;
case METRICS_VOLTAGE_VDDGFX:
*value = 0;
@@ -498,6 +525,8 @@ static ssize_t smu_v14_0_0_get_gpu_metrics(struct smu_context *smu,
sizeof(uint16_t) * 16);
gpu_metrics->average_dram_reads = metrics.DRAMReads;
gpu_metrics->average_dram_writes = metrics.DRAMWrites;
+ gpu_metrics->average_ipu_reads = metrics.IpuReads;
+ gpu_metrics->average_ipu_writes = metrics.IpuWrites;
gpu_metrics->average_socket_power = metrics.SocketPower;
gpu_metrics->average_ipu_power = metrics.IpuPower;
@@ -505,6 +534,7 @@ static ssize_t smu_v14_0_0_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->average_gfx_power = metrics.GfxPower;
gpu_metrics->average_dgpu_power = metrics.dGpuPower;
gpu_metrics->average_all_core_power = metrics.AllCorePower;
+ gpu_metrics->average_sys_power = metrics.Psys;
memcpy(&gpu_metrics->average_core_power[0],
&metrics.CorePower[0],
sizeof(uint16_t) * 16);
@@ -515,6 +545,8 @@ static ssize_t smu_v14_0_0_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->average_fclk_frequency = metrics.FclkFrequency;
gpu_metrics->average_vclk_frequency = metrics.VclkFrequency;
gpu_metrics->average_ipuclk_frequency = metrics.IpuclkFrequency;
+ gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency;
+ gpu_metrics->average_mpipu_frequency = metrics.MpipuclkFrequency;
memcpy(&gpu_metrics->current_coreclk[0],
&metrics.CoreFrequency[0],
@@ -522,6 +554,14 @@ static ssize_t smu_v14_0_0_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->current_core_maxfreq = metrics.InfrastructureCpuMaxFreq;
gpu_metrics->current_gfx_maxfreq = metrics.InfrastructureGfxMaxFreq;
+ gpu_metrics->throttle_residency_prochot = metrics.ThrottleResidency_PROCHOT;
+ gpu_metrics->throttle_residency_spl = metrics.ThrottleResidency_SPL;
+ gpu_metrics->throttle_residency_fppt = metrics.ThrottleResidency_FPPT;
+ gpu_metrics->throttle_residency_sppt = metrics.ThrottleResidency_SPPT;
+ gpu_metrics->throttle_residency_thm_core = metrics.ThrottleResidency_THM_CORE;
+ gpu_metrics->throttle_residency_thm_gfx = metrics.ThrottleResidency_THM_GFX;
+ gpu_metrics->throttle_residency_thm_soc = metrics.ThrottleResidency_THM_SOC;
+
gpu_metrics->time_filter_alphavalue = metrics.FilterAlphaValue;
gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
@@ -1045,6 +1085,25 @@ static int smu_v14_0_0_set_umsch_mm_enable(struct smu_context *smu,
0, NULL);
}
+static int smu_14_0_0_get_dpm_table(struct smu_context *smu, struct dpm_clocks *clock_table)
+{
+ DpmClocks_t *clk_table = smu->smu_table.clocks_table;
+ uint8_t idx;
+
+ /* Only the Clock information of SOC and VPE is copied to provide VPE DPM settings for use. */
+ for (idx = 0; idx < NUM_SOCCLK_DPM_LEVELS; idx++) {
+ clock_table->SocClocks[idx].Freq = (idx < clk_table->NumSocClkLevelsEnabled) ? clk_table->SocClocks[idx]:0;
+ clock_table->SocClocks[idx].Vol = 0;
+ }
+
+ for (idx = 0; idx < NUM_VPE_DPM_LEVELS; idx++) {
+ clock_table->VPEClocks[idx].Freq = (idx < clk_table->VpeClkLevelsEnabled) ? clk_table->VPEClocks[idx]:0;
+ clock_table->VPEClocks[idx].Vol = 0;
+ }
+
+ return 0;
+}
+
static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
.check_fw_status = smu_v14_0_check_fw_status,
.check_fw_version = smu_v14_0_check_fw_version,
@@ -1075,6 +1134,7 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
.set_gfx_power_up_by_imu = smu_v14_0_set_gfx_power_up_by_imu,
.dpm_set_vpe_enable = smu_v14_0_0_set_vpe_enable,
.dpm_set_umsch_mm_enable = smu_v14_0_0_set_umsch_mm_enable,
+ .get_dpm_clock_table = smu_14_0_0_get_dpm_table,
};
static void smu_v14_0_0_set_smu_mailbox_registers(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index 001a5cf09657..00cd615bbcdc 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -989,6 +989,9 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
case METRICS_VERSION(1, 4):
structure_size = sizeof(struct gpu_metrics_v1_4);
break;
+ case METRICS_VERSION(1, 5):
+ structure_size = sizeof(struct gpu_metrics_v1_5);
+ break;
case METRICS_VERSION(2, 0):
structure_size = sizeof(struct gpu_metrics_v2_0);
break;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
index 64766ac69c53..6f4d212607d7 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
@@ -98,6 +98,9 @@
#define smu_set_config_table(smu, config_table) smu_ppt_funcs(set_config_table, -EOPNOTSUPP, smu, config_table)
#define smu_init_pptable_microcode(smu) smu_ppt_funcs(init_pptable_microcode, 0, smu)
#define smu_notify_rlc_state(smu, en) smu_ppt_funcs(notify_rlc_state, 0, smu, en)
+#define smu_is_asic_wbrf_supported(smu) smu_ppt_funcs(is_asic_wbrf_supported, false, smu)
+#define smu_enable_uclk_shadow(smu, enable) smu_ppt_funcs(enable_uclk_shadow, 0, smu, enable)
+#define smu_set_wbrf_exclusion_ranges(smu, freq_band_range) smu_ppt_funcs(set_wbrf_exclusion_ranges, -EOPNOTSUPP, smu, freq_band_range)
#endif
#endif
diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c
index dc01c43f6193..d72c22dcf685 100644
--- a/drivers/gpu/drm/arm/malidp_crtc.c
+++ b/drivers/gpu/drm/arm/malidp_crtc.c
@@ -221,7 +221,7 @@ static int malidp_crtc_atomic_check_ctm(struct drm_crtc *crtc,
/*
* The size of the ctm is checked in
- * drm_atomic_replace_property_blob_from_id.
+ * drm_property_replace_blob_from_id.
*/
ctm = (struct drm_color_ctm *)state->ctm->data;
for (i = 0; i < ARRAY_SIZE(ctm->matrix); ++i) {
diff --git a/drivers/gpu/drm/ast/ast_i2c.c b/drivers/gpu/drm/ast/ast_i2c.c
index 0e845e7acd9b..e5d3f7121de4 100644
--- a/drivers/gpu/drm/ast/ast_i2c.c
+++ b/drivers/gpu/drm/ast/ast_i2c.c
@@ -120,7 +120,6 @@ struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev)
return NULL;
i2c->adapter.owner = THIS_MODULE;
- i2c->adapter.class = I2C_CLASS_DDC;
i2c->adapter.dev.parent = dev->dev;
i2c->dev = dev;
i2c_set_adapdata(&i2c->adapter, i2c);
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 19d2dc05c397..efd996f6c138 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -330,6 +330,7 @@ config DRM_TOSHIBA_TC358768
select REGMAP_I2C
select DRM_PANEL
select DRM_MIPI_DSI
+ select VIDEOMODE_HELPERS
help
Toshiba TC358768AXBG/TC358778XBG DSI bridge chip driver.
diff --git a/drivers/gpu/drm/bridge/aux-bridge.c b/drivers/gpu/drm/bridge/aux-bridge.c
index 49d7c2ab1ecc..b29980f95379 100644
--- a/drivers/gpu/drm/bridge/aux-bridge.c
+++ b/drivers/gpu/drm/bridge/aux-bridge.c
@@ -6,6 +6,7 @@
*/
#include <linux/auxiliary_bus.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <drm/drm_bridge.h>
#include <drm/bridge/aux-bridge.h>
@@ -57,7 +58,7 @@ int drm_aux_bridge_register(struct device *parent)
adev->id = ret;
adev->name = "aux_bridge";
adev->dev.parent = parent;
- adev->dev.of_node = parent->of_node;
+ adev->dev.of_node = of_node_get(parent->of_node);
adev->dev.release = drm_aux_bridge_release;
ret = auxiliary_device_init(adev);
diff --git a/drivers/gpu/drm/bridge/aux-hpd-bridge.c b/drivers/gpu/drm/bridge/aux-hpd-bridge.c
index 5d2ab3a715f9..bb55f697a181 100644
--- a/drivers/gpu/drm/bridge/aux-hpd-bridge.c
+++ b/drivers/gpu/drm/bridge/aux-hpd-bridge.c
@@ -6,7 +6,7 @@
*/
#include <linux/auxiliary_bus.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <drm/drm_bridge.h>
#include <drm/bridge/aux-bridge.h>
@@ -68,9 +68,9 @@ struct device *drm_dp_hpd_bridge_register(struct device *parent,
adev->id = ret;
adev->name = "dp_hpd_bridge";
adev->dev.parent = parent;
- adev->dev.of_node = parent->of_node;
+ adev->dev.of_node = of_node_get(parent->of_node);
adev->dev.release = drm_aux_hpd_bridge_release;
- adev->dev.platform_data = np;
+ adev->dev.platform_data = of_node_get(np);
ret = auxiliary_device_init(adev);
if (ret) {
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index e48823a4f1ed..7f41525f7a6e 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -4,8 +4,6 @@
* Copyright (C) 2017 Broadcom
*/
-#include <linux/device.h>
-
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_connector.h>
@@ -21,7 +19,6 @@ struct panel_bridge {
struct drm_bridge bridge;
struct drm_connector connector;
struct drm_panel *panel;
- struct device_link *link;
u32 connector_type;
};
@@ -63,24 +60,13 @@ static int panel_bridge_attach(struct drm_bridge *bridge,
{
struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
struct drm_connector *connector = &panel_bridge->connector;
- struct drm_panel *panel = panel_bridge->panel;
- struct drm_device *drm_dev = bridge->dev;
int ret;
- panel_bridge->link = device_link_add(drm_dev->dev, panel->dev,
- DL_FLAG_STATELESS);
- if (!panel_bridge->link) {
- DRM_ERROR("Failed to add device link between %s and %s\n",
- dev_name(drm_dev->dev), dev_name(panel->dev));
- return -EINVAL;
- }
-
if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
return 0;
if (!bridge->encoder) {
DRM_ERROR("Missing encoder\n");
- device_link_del(panel_bridge->link);
return -ENODEV;
}
@@ -92,7 +78,6 @@ static int panel_bridge_attach(struct drm_bridge *bridge,
panel_bridge->connector_type);
if (ret) {
DRM_ERROR("Failed to initialize connector\n");
- device_link_del(panel_bridge->link);
return ret;
}
@@ -115,8 +100,6 @@ static void panel_bridge_detach(struct drm_bridge *bridge)
struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
struct drm_connector *connector = &panel_bridge->connector;
- device_link_del(panel_bridge->link);
-
/*
* Cleanup the connector if we know it was initialized.
*
diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c
index 8161b1a1a4b1..541e4f5afc4c 100644
--- a/drivers/gpu/drm/bridge/parade-ps8640.c
+++ b/drivers/gpu/drm/bridge/parade-ps8640.c
@@ -210,7 +210,7 @@ static ssize_t ps8640_aux_transfer_msg(struct drm_dp_aux *aux,
struct ps8640 *ps_bridge = aux_to_ps8640(aux);
struct regmap *map = ps_bridge->regmap[PAGE0_DP_CNTL];
struct device *dev = &ps_bridge->page[PAGE0_DP_CNTL]->dev;
- unsigned int len = msg->size;
+ size_t len = msg->size;
unsigned int data;
unsigned int base;
int ret;
@@ -330,11 +330,12 @@ static ssize_t ps8640_aux_transfer_msg(struct drm_dp_aux *aux,
return ret;
}
- buf[i] = data;
+ if (i < msg->size)
+ buf[i] = data;
}
}
- return len;
+ return min(len, msg->size);
}
static ssize_t ps8640_aux_transfer(struct drm_dp_aux *aux,
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 52d91a0df85e..aca5bb0866f8 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -515,7 +515,6 @@ static struct i2c_adapter *dw_hdmi_i2c_adapter(struct dw_hdmi *hdmi)
init_completion(&i2c->cmp);
adap = &i2c->adap;
- adap->class = I2C_CLASS_DDC;
adap->owner = THIS_MODULE;
adap->dev.parent = hdmi->dev;
adap->algo = &dw_hdmi_algorithm;
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 5b8e1dfc458d..62cc3893dca5 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -527,6 +527,7 @@ static ssize_t ti_sn_aux_transfer(struct drm_dp_aux *aux,
u32 request_val = AUX_CMD_REQ(msg->request);
u8 *buf = msg->buffer;
unsigned int len = msg->size;
+ unsigned int short_len;
unsigned int val;
int ret;
u8 addr_len[SN_AUX_LENGTH_REG + 1 - SN_AUX_ADDR_19_16_REG];
@@ -600,7 +601,8 @@ static ssize_t ti_sn_aux_transfer(struct drm_dp_aux *aux,
}
if (val & AUX_IRQ_STATUS_AUX_SHORT) {
- ret = regmap_read(pdata->regmap, SN_AUX_LENGTH_REG, &len);
+ ret = regmap_read(pdata->regmap, SN_AUX_LENGTH_REG, &short_len);
+ len = min(len, short_len);
if (ret)
goto exit;
} else if (val & AUX_IRQ_STATUS_NAT_I2C_FAIL) {
@@ -1413,7 +1415,7 @@ static int ti_sn_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
int ret;
if (!pdata->pwm_enabled) {
- ret = pm_runtime_resume_and_get(pdata->dev);
+ ret = pm_runtime_resume_and_get(chip->dev);
if (ret < 0)
return ret;
}
@@ -1429,7 +1431,7 @@ static int ti_sn_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
SN_GPIO_MUX_MASK << (2 * SN_PWM_GPIO_IDX),
SN_GPIO_MUX_SPECIAL << (2 * SN_PWM_GPIO_IDX));
if (ret) {
- dev_err(pdata->dev, "failed to mux in PWM function\n");
+ dev_err(chip->dev, "failed to mux in PWM function\n");
goto out;
}
}
@@ -1505,7 +1507,7 @@ static int ti_sn_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
ret = regmap_write(pdata->regmap, SN_PWM_PRE_DIV_REG, pre_div);
if (ret) {
- dev_err(pdata->dev, "failed to update PWM_PRE_DIV\n");
+ dev_err(chip->dev, "failed to update PWM_PRE_DIV\n");
goto out;
}
@@ -1517,7 +1519,7 @@ static int ti_sn_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
FIELD_PREP(SN_PWM_INV_MASK, state->polarity == PWM_POLARITY_INVERSED);
ret = regmap_write(pdata->regmap, SN_PWM_EN_INV_REG, pwm_en_inv);
if (ret) {
- dev_err(pdata->dev, "failed to update PWM_EN/PWM_INV\n");
+ dev_err(chip->dev, "failed to update PWM_EN/PWM_INV\n");
goto out;
}
@@ -1525,7 +1527,7 @@ static int ti_sn_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
out:
if (!pdata->pwm_enabled)
- pm_runtime_put_sync(pdata->dev);
+ pm_runtime_put_sync(chip->dev);
return ret;
}
@@ -1585,12 +1587,14 @@ static int ti_sn_pwm_probe(struct auxiliary_device *adev,
{
struct ti_sn65dsi86 *pdata = dev_get_drvdata(adev->dev.parent);
- pdata->pchip.dev = pdata->dev;
+ pdata->pchip.dev = &adev->dev;
pdata->pchip.ops = &ti_sn_pwm_ops;
pdata->pchip.npwm = 1;
pdata->pchip.of_xlate = of_pwm_single_xlate;
pdata->pchip.of_pwm_n_cells = 1;
+ devm_pm_runtime_enable(&adev->dev);
+
return pwmchip_add(&pdata->pchip);
}
@@ -1601,7 +1605,7 @@ static void ti_sn_pwm_remove(struct auxiliary_device *adev)
pwmchip_remove(&pdata->pchip);
if (pdata->pwm_enabled)
- pm_runtime_put_sync(pdata->dev);
+ pm_runtime_put_sync(&adev->dev);
}
static const struct auxiliary_device_id ti_sn_pwm_id_table[] = {
diff --git a/drivers/gpu/drm/ci/arm64.config b/drivers/gpu/drm/ci/arm64.config
index b4f653417883..8dbce9919a57 100644
--- a/drivers/gpu/drm/ci/arm64.config
+++ b/drivers/gpu/drm/ci/arm64.config
@@ -186,6 +186,7 @@ CONFIG_HW_RANDOM_MTK=y
CONFIG_MTK_DEVAPC=y
CONFIG_PWM_MTK_DISP=y
CONFIG_MTK_CMDQ=y
+CONFIG_REGULATOR_DA9211=y
# For nouveau. Note that DRM must be a module so that it's loaded after NFS is up to provide the firmware.
CONFIG_ARCH_TEGRA=y
diff --git a/drivers/gpu/drm/ci/build.sh b/drivers/gpu/drm/ci/build.sh
index e5c5dcedd108..f73f3471e94e 100644
--- a/drivers/gpu/drm/ci/build.sh
+++ b/drivers/gpu/drm/ci/build.sh
@@ -19,7 +19,7 @@ if [[ "$KERNEL_ARCH" = "arm64" ]]; then
DEVICE_TREES+=" arch/arm64/boot/dts/amlogic/meson-gxl-s805x-libretech-ac.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dtb"
- DEVICE_TREES+=" arch/arm64/boot/dts/qcom/apq8016-sbc.dtb"
+ DEVICE_TREES+=" arch/arm64/boot/dts/qcom/apq8016-sbc-usb-host.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/qcom/apq8096-db820c.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/amlogic/meson-g12b-a311d-khadas-vim3.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/mediatek/mt8173-elm-hana.dtb"
@@ -58,6 +58,9 @@ git config --global user.email "fdo@example.com"
git config --global user.name "freedesktop.org CI"
git config --global pull.rebase true
+# cleanup git state on the worker
+rm -rf .git/rebase-merge
+
# Try to merge fixes from target repo
if [ "$(git ls-remote --exit-code --heads ${UPSTREAM_REPO} ${TARGET_BRANCH}-external-fixes)" ]; then
git pull ${UPSTREAM_REPO} ${TARGET_BRANCH}-external-fixes
@@ -75,19 +78,19 @@ else
fi
fi
-for opt in $ENABLE_KCONFIGS; do
- echo CONFIG_$opt=y >> drivers/gpu/drm/ci/${KERNEL_ARCH}.config
-done
-for opt in $DISABLE_KCONFIGS; do
- echo CONFIG_$opt=n >> drivers/gpu/drm/ci/${KERNEL_ARCH}.config
-done
-
if [[ -n "${MERGE_FRAGMENT}" ]]; then
./scripts/kconfig/merge_config.sh ${DEFCONFIG} drivers/gpu/drm/ci/${MERGE_FRAGMENT}
else
make `basename ${DEFCONFIG}`
fi
+for opt in $ENABLE_KCONFIGS; do
+ ./scripts/config --enable CONFIG_$opt
+done
+for opt in $DISABLE_KCONFIGS; do
+ ./scripts/config --disable CONFIG_$opt
+done
+
make ${KERNEL_IMAGE_NAME}
mkdir -p /lava-files/
diff --git a/drivers/gpu/drm/ci/gitlab-ci.yml b/drivers/gpu/drm/ci/gitlab-ci.yml
index aeb9bab1b069..dac92cc2777c 100644
--- a/drivers/gpu/drm/ci/gitlab-ci.yml
+++ b/drivers/gpu/drm/ci/gitlab-ci.yml
@@ -5,7 +5,7 @@ variables:
UPSTREAM_REPO: git://anongit.freedesktop.org/drm/drm
TARGET_BRANCH: drm-next
- IGT_VERSION: d1db7333d9c5fbbb05e50b0804123950d9dc1c46
+ IGT_VERSION: d2af13d9f5be5ce23d996e4afd3e45990f5ab977
DEQP_RUNNER_GIT_URL: https://gitlab.freedesktop.org/anholt/deqp-runner.git
DEQP_RUNNER_GIT_TAG: v0.15.0
diff --git a/drivers/gpu/drm/ci/igt_runner.sh b/drivers/gpu/drm/ci/igt_runner.sh
index 2f815ee3a8a3..f1a08b9b146f 100755
--- a/drivers/gpu/drm/ci/igt_runner.sh
+++ b/drivers/gpu/drm/ci/igt_runner.sh
@@ -15,15 +15,21 @@ cat /sys/kernel/debug/device_component/*
'
# Dump drm state to confirm that kernel was able to find a connected display:
-# TODO this path might not exist for all drivers.. maybe run modetest instead?
set +e
cat /sys/kernel/debug/dri/*/state
set -e
case "$DRIVER_NAME" in
- rockchip|mediatek|meson)
+ rockchip|meson)
export IGT_FORCE_DRIVER="panfrost"
;;
+ mediatek)
+ if [ "$GPU_VERSION" = "mt8173" ]; then
+ export IGT_FORCE_DRIVER=${DRIVER_NAME}
+ elif [ "$GPU_VERSION" = "mt8183" ]; then
+ export IGT_FORCE_DRIVER="panfrost"
+ fi
+ ;;
amdgpu)
# Cannot use HWCI_KERNEL_MODULES as at that point we don't have the module in /lib
mv /install/modules/lib/modules/* /lib/modules/.
diff --git a/drivers/gpu/drm/ci/test.yml b/drivers/gpu/drm/ci/test.yml
index f285ed67eb3d..2c9a1838e728 100644
--- a/drivers/gpu/drm/ci/test.yml
+++ b/drivers/gpu/drm/ci/test.yml
@@ -102,15 +102,12 @@ msm:apq8016:
stage: msm
variables:
DRIVER_NAME: msm
- BM_DTB: https://${PIPELINE_ARTIFACTS_BASE}/arm64/apq8016-sbc.dtb
+ BM_DTB: https://${PIPELINE_ARTIFACTS_BASE}/arm64/apq8016-sbc-usb-host.dtb
GPU_VERSION: apq8016
BM_CMDLINE: "ip=dhcp console=ttyMSM0,115200n8 $BM_KERNEL_EXTRA_ARGS root=/dev/nfs rw nfsrootdebug nfsroot=,tcp,nfsvers=4.2 init=/init $BM_KERNELARGS"
RUNNER_TAG: google-freedreno-db410c
script:
- ./install/bare-metal/fastboot.sh
- rules:
- # TODO: current issue: it is not fiding the NFS root. Fix and remove this rule.
- - when: never
msm:apq8096:
extends:
@@ -280,9 +277,6 @@ mediatek:mt8173:
DEVICE_TYPE: mt8173-elm-hana
GPU_VERSION: mt8173
RUNNER_TAG: mesa-ci-x86-64-lava-mt8173-elm-hana
- rules:
- # TODO: current issue: device is hanging. Fix and remove this rule.
- - when: never
mediatek:mt8183:
extends:
@@ -335,11 +329,10 @@ virtio_gpu:none:
script:
- ln -sf $CI_PROJECT_DIR/install /install
- mv install/bzImage /lava-files/bzImage
+ - mkdir -p $CI_PROJECT_DIR/results
+ - ln -sf $CI_PROJECT_DIR/results /results
- install/crosvm-runner.sh install/igt_runner.sh
needs:
- debian/x86_64_test-gl
- testing:x86_64
- igt:x86_64
- rules:
- # TODO: current issue: malloc(): corrupted top size. Fix and remove this rule.
- - when: never \ No newline at end of file
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt
index 671916067dba..ef0cb7c3698c 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt
@@ -1,5 +1,4 @@
kms_3d,Fail
-kms_addfb_basic@addfb25-bad-modifier,Fail
kms_bw@linear-tiling-1-displays-1920x1080p,Fail
kms_bw@linear-tiling-1-displays-2560x1440p,Fail
kms_bw@linear-tiling-1-displays-3840x2160p,Fail
@@ -9,13 +8,19 @@ kms_bw@linear-tiling-2-displays-3840x2160p,Fail
kms_bw@linear-tiling-3-displays-1920x1080p,Fail
kms_bw@linear-tiling-3-displays-2560x1440p,Fail
kms_bw@linear-tiling-3-displays-3840x2160p,Fail
+kms_color@invalid-gamma-lut-sizes,Fail
kms_color@pipe-A-invalid-gamma-lut-sizes,Fail
kms_color@pipe-B-invalid-gamma-lut-sizes,Fail
-kms_force_connector_basic@force-connector-state,Fail
+kms_cursor_legacy@cursor-vs-flip-atomic,Fail
+kms_cursor_legacy@cursor-vs-flip-legacy,Fail
+kms_flip@flip-vs-modeset-vs-hang,Fail
+kms_flip@flip-vs-panning-vs-hang,Fail
+kms_flip@flip-vs-suspend,Fail
+kms_flip@flip-vs-suspend-interruptible,Fail
kms_force_connector_basic@force-edid,Fail
kms_force_connector_basic@force-load-detect,Fail
kms_force_connector_basic@prune-stale-modes,Fail
-kms_invalid_mode@int-max-clock,Fail
+kms_hdmi_inject@inject-4k,Fail
kms_plane_scaling@planes-upscale-20x20,Fail
kms_plane_scaling@planes-upscale-20x20-downscale-factor-0-25,Fail
kms_plane_scaling@planes-upscale-20x20-downscale-factor-0-5,Fail
@@ -27,3 +32,5 @@ kms_properties@get_properties-sanity-atomic,Fail
kms_properties@plane-properties-atomic,Fail
kms_properties@plane-properties-legacy,Fail
kms_rmfb@close-fd,Fail
+kms_selftest@drm_format,Timeout
+kms_selftest@drm_format_helper,Timeout
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt b/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
index 9981682feab2..d39d254c935e 100644
--- a/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
@@ -6,10 +6,15 @@ kms_cursor_legacy@all-pipes-single-bo,Fail
kms_cursor_legacy@all-pipes-single-move,Fail
kms_cursor_legacy@all-pipes-torture-bo,Fail
kms_cursor_legacy@all-pipes-torture-move,Fail
+kms_cursor_legacy@forked-bo,Fail
+kms_cursor_legacy@forked-move,Fail
kms_cursor_legacy@pipe-A-forked-bo,Fail
kms_cursor_legacy@pipe-A-forked-move,Fail
kms_cursor_legacy@pipe-A-single-bo,Fail
kms_cursor_legacy@pipe-A-single-move,Fail
kms_cursor_legacy@pipe-A-torture-bo,Fail
kms_cursor_legacy@pipe-A-torture-move,Fail
+kms_force_connector_basic@force-edid,Fail
kms_hdmi_inject@inject-4k,Fail
+kms_selftest@drm_format,Timeout
+kms_selftest@drm_format_helper,Timeout
diff --git a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt
index 9586b2339f6f..007f21e56d89 100644
--- a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt
@@ -10,6 +10,49 @@ kms_bw@linear-tiling-1-displays-3840x2160p,Fail
kms_bw@linear-tiling-2-displays-1920x1080p,Fail
kms_bw@linear-tiling-2-displays-2560x1440p,Fail
kms_bw@linear-tiling-2-displays-3840x2160p,Fail
+kms_bw@linear-tiling-3-displays-1920x1080p,Fail
+kms_bw@linear-tiling-3-displays-2560x1440p,Fail
+kms_bw@linear-tiling-3-displays-3840x2160p,Fail
+kms_bw@linear-tiling-4-displays-1920x1080p,Fail
+kms_bw@linear-tiling-4-displays-2560x1440p,Fail
+kms_bw@linear-tiling-4-displays-3840x2160p,Fail
+kms_bw@linear-tiling-5-displays-1920x1080p,Fail
+kms_bw@linear-tiling-5-displays-2560x1440p,Fail
+kms_bw@linear-tiling-5-displays-3840x2160p,Fail
+kms_bw@linear-tiling-6-displays-1920x1080p,Fail
+kms_bw@linear-tiling-6-displays-2560x1440p,Fail
+kms_bw@linear-tiling-6-displays-3840x2160p,Fail
+kms_bw@linear-tiling-7-displays-1920x1080p,Fail
+kms_bw@linear-tiling-7-displays-2560x1440p,Fail
+kms_bw@linear-tiling-7-displays-3840x2160p,Fail
+kms_bw@linear-tiling-8-displays-1920x1080p,Fail
+kms_bw@linear-tiling-8-displays-2560x1440p,Fail
+kms_bw@linear-tiling-8-displays-3840x2160p,Fail
+kms_flip@absolute-wf_vblank,Fail
+kms_flip@absolute-wf_vblank-interruptible,Fail
+kms_flip@basic-flip-vs-wf_vblank,Fail
+kms_flip@blocking-absolute-wf_vblank,Fail
+kms_flip@blocking-absolute-wf_vblank-interruptible,Fail
+kms_flip@blocking-wf_vblank,Fail
+kms_flip@busy-flip,Fail
+kms_flip@dpms-vs-vblank-race,Fail
+kms_flip@dpms-vs-vblank-race-interruptible,Fail
+kms_flip@flip-vs-absolute-wf_vblank,Fail
+kms_flip@flip-vs-absolute-wf_vblank-interruptible,Fail
+kms_flip@flip-vs-blocking-wf-vblank,Fail
+kms_flip@flip-vs-expired-vblank,Fail
+kms_flip@flip-vs-expired-vblank-interruptible,Fail
+kms_flip@flip-vs-modeset-vs-hang,Fail
+kms_flip@flip-vs-panning-vs-hang,Fail
+kms_flip@flip-vs-wf_vblank-interruptible,Fail
+kms_flip@modeset-vs-vblank-race,Fail
+kms_flip@modeset-vs-vblank-race-interruptible,Fail
+kms_flip@plain-flip-fb-recreate,Fail
+kms_flip@plain-flip-fb-recreate-interruptible,Fail
+kms_flip@plain-flip-ts-check,Fail
+kms_flip@plain-flip-ts-check-interruptible,Fail
+kms_flip@wf_vblank-ts-check,Fail
+kms_flip@wf_vblank-ts-check-interruptible,Fail
kms_invalid_mode@int-max-clock,Fail
kms_plane_scaling@downscale-with-modifier-factor-0-25,Fail
kms_plane_scaling@downscale-with-rotation-factor-0-25,Fail
@@ -22,6 +65,9 @@ kms_plane_scaling@upscale-with-modifier-factor-0-25,Fail
kms_plane_scaling@upscale-with-pixel-format-20x20,Fail
kms_plane_scaling@upscale-with-pixel-format-factor-0-25,Fail
kms_plane_scaling@upscale-with-rotation-20x20,Fail
+kms_selftest@drm_format,Timeout
+kms_selftest@drm_format_helper,Timeout
+kms_setmode@basic,Fail
kms_vblank@crtc-id,Fail
kms_vblank@invalid,Fail
kms_vblank@pipe-A-accuracy-idle,Fail
diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
index d72b6f9a352c..b1ca3a1100da 100644
--- a/drivers/gpu/drm/display/drm_dp_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_helper.c
@@ -2102,7 +2102,6 @@ int drm_dp_aux_register(struct drm_dp_aux *aux)
if (!aux->ddc.algo)
drm_dp_aux_init(aux);
- aux->ddc.class = I2C_CLASS_DDC;
aux->ddc.owner = THIS_MODULE;
aux->ddc.dev.parent = aux->dev;
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index 8ca01a6bf645..bd6c24d4213c 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -5926,7 +5926,6 @@ static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port)
aux->ddc.algo_data = aux;
aux->ddc.retries = 3;
- aux->ddc.class = I2C_CLASS_DDC;
aux->ddc.owner = THIS_MODULE;
/* FIXME: set the kdev of the port's connector as parent */
aux->ddc.dev.parent = parent_dev;
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index c31fc0b48c31..a91737adf8e7 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -733,6 +733,7 @@ static void drm_atomic_plane_print_state(struct drm_printer *p,
drm_get_color_encoding_name(state->color_encoding));
drm_printf(p, "\tcolor-range=%s\n",
drm_get_color_range_name(state->color_range));
+ drm_printf(p, "\tcolor_mgmt_changed=%d\n", state->color_mgmt_changed);
if (plane->funcs->atomic_print_state)
plane->funcs->atomic_print_state(p, state);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index c3f677130def..39ef0a6addeb 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -795,9 +795,9 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
EXPORT_SYMBOL(drm_atomic_helper_check_modeset);
/**
- * drm_atomic_helper_check_wb_encoder_state() - Check writeback encoder state
- * @encoder: encoder state to check
- * @conn_state: connector state to check
+ * drm_atomic_helper_check_wb_connector_state() - Check writeback connector state
+ * @connector: corresponding connector
+ * @state: the driver state object
*
* Checks if the writeback connector state is valid, and returns an error if it
* isn't.
@@ -806,9 +806,11 @@ EXPORT_SYMBOL(drm_atomic_helper_check_modeset);
* Zero for success or -errno
*/
int
-drm_atomic_helper_check_wb_encoder_state(struct drm_encoder *encoder,
- struct drm_connector_state *conn_state)
+drm_atomic_helper_check_wb_connector_state(struct drm_connector *connector,
+ struct drm_atomic_state *state)
{
+ struct drm_connector_state *conn_state =
+ drm_atomic_get_new_connector_state(state, connector);
struct drm_writeback_job *wb_job = conn_state->writeback_job;
struct drm_property_blob *pixel_format_blob;
struct drm_framebuffer *fb;
@@ -827,11 +829,11 @@ drm_atomic_helper_check_wb_encoder_state(struct drm_encoder *encoder,
if (fb->format->format == formats[i])
return 0;
- drm_dbg_kms(encoder->dev, "Invalid pixel format %p4cc\n", &fb->format->format);
+ drm_dbg_kms(connector->dev, "Invalid pixel format %p4cc\n", &fb->format->format);
return -EINVAL;
}
-EXPORT_SYMBOL(drm_atomic_helper_check_wb_encoder_state);
+EXPORT_SYMBOL(drm_atomic_helper_check_wb_connector_state);
/**
* drm_atomic_helper_check_plane_state() - Check plane state for validity
@@ -2012,7 +2014,7 @@ int drm_atomic_helper_commit(struct drm_device *dev,
return ret;
drm_atomic_helper_async_commit(dev, state);
- drm_atomic_helper_cleanup_planes(dev, state);
+ drm_atomic_helper_unprepare_planes(dev, state);
return 0;
}
@@ -2072,7 +2074,7 @@ int drm_atomic_helper_commit(struct drm_device *dev,
return 0;
err:
- drm_atomic_helper_cleanup_planes(dev, state);
+ drm_atomic_helper_unprepare_planes(dev, state);
return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_commit);
@@ -2650,6 +2652,39 @@ fail_prepare_fb:
}
EXPORT_SYMBOL(drm_atomic_helper_prepare_planes);
+/**
+ * drm_atomic_helper_unprepare_planes - release plane resources on aborts
+ * @dev: DRM device
+ * @state: atomic state object with old state structures
+ *
+ * This function cleans up plane state, specifically framebuffers, from the
+ * atomic state. It undoes the effects of drm_atomic_helper_prepare_planes()
+ * when aborting an atomic commit. For cleaning up after a successful commit
+ * use drm_atomic_helper_cleanup_planes().
+ */
+void drm_atomic_helper_unprepare_planes(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane *plane;
+ struct drm_plane_state *new_plane_state;
+ int i;
+
+ for_each_new_plane_in_state(state, plane, new_plane_state, i) {
+ const struct drm_plane_helper_funcs *funcs = plane->helper_private;
+
+ if (funcs->end_fb_access)
+ funcs->end_fb_access(plane, new_plane_state);
+ }
+
+ for_each_new_plane_in_state(state, plane, new_plane_state, i) {
+ const struct drm_plane_helper_funcs *funcs = plane->helper_private;
+
+ if (funcs->cleanup_fb)
+ funcs->cleanup_fb(plane, new_plane_state);
+ }
+}
+EXPORT_SYMBOL(drm_atomic_helper_unprepare_planes);
+
static bool plane_crtc_active(const struct drm_plane_state *state)
{
return state->crtc && state->crtc->state->active;
@@ -2784,6 +2819,17 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
funcs->atomic_flush(crtc, old_state);
}
+
+ /*
+ * Signal end of framebuffer access here before hw_done. After hw_done,
+ * a later commit might have already released the plane state.
+ */
+ for_each_old_plane_in_state(old_state, plane, old_plane_state, i) {
+ const struct drm_plane_helper_funcs *funcs = plane->helper_private;
+
+ if (funcs->end_fb_access)
+ funcs->end_fb_access(plane, old_plane_state);
+ }
}
EXPORT_SYMBOL(drm_atomic_helper_commit_planes);
@@ -2911,40 +2957,22 @@ EXPORT_SYMBOL(drm_atomic_helper_disable_planes_on_crtc);
* configuration. Hence the old configuration must be perserved in @old_state to
* be able to call this function.
*
- * This function must also be called on the new state when the atomic update
- * fails at any point after calling drm_atomic_helper_prepare_planes().
+ * This function may not be called on the new state when the atomic update
+ * fails at any point after calling drm_atomic_helper_prepare_planes(). Use
+ * drm_atomic_helper_unprepare_planes() in this case.
*/
void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
struct drm_atomic_state *old_state)
{
struct drm_plane *plane;
- struct drm_plane_state *old_plane_state, *new_plane_state;
+ struct drm_plane_state *old_plane_state;
int i;
- for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) {
+ for_each_old_plane_in_state(old_state, plane, old_plane_state, i) {
const struct drm_plane_helper_funcs *funcs = plane->helper_private;
- if (funcs->end_fb_access)
- funcs->end_fb_access(plane, new_plane_state);
- }
-
- for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) {
- const struct drm_plane_helper_funcs *funcs;
- struct drm_plane_state *plane_state;
-
- /*
- * This might be called before swapping when commit is aborted,
- * in which case we have to cleanup the new state.
- */
- if (old_plane_state == plane->state)
- plane_state = new_plane_state;
- else
- plane_state = old_plane_state;
-
- funcs = plane->helper_private;
-
if (funcs->cleanup_fb)
- funcs->cleanup_fb(plane, plane_state);
+ funcs->cleanup_fb(plane, old_plane_state);
}
}
EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c
index 54975de44a0e..519228eb1095 100644
--- a/drivers/gpu/drm/drm_atomic_state_helper.c
+++ b/drivers/gpu/drm/drm_atomic_state_helper.c
@@ -352,6 +352,7 @@ void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane,
state->fence = NULL;
state->commit = NULL;
state->fb_damage_clips = NULL;
+ state->color_mgmt_changed = false;
}
EXPORT_SYMBOL(__drm_atomic_helper_plane_duplicate_state);
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index aee4a65d4959..29d4940188d4 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -362,48 +362,6 @@ static s32 __user *get_out_fence_for_connector(struct drm_atomic_state *state,
return fence_ptr;
}
-static int
-drm_atomic_replace_property_blob_from_id(struct drm_device *dev,
- struct drm_property_blob **blob,
- uint64_t blob_id,
- ssize_t expected_size,
- ssize_t expected_elem_size,
- bool *replaced)
-{
- struct drm_property_blob *new_blob = NULL;
-
- if (blob_id != 0) {
- new_blob = drm_property_lookup_blob(dev, blob_id);
- if (new_blob == NULL) {
- drm_dbg_atomic(dev,
- "cannot find blob ID %llu\n", blob_id);
- return -EINVAL;
- }
-
- if (expected_size > 0 &&
- new_blob->length != expected_size) {
- drm_dbg_atomic(dev,
- "[BLOB:%d] length %zu different from expected %zu\n",
- new_blob->base.id, new_blob->length, expected_size);
- drm_property_blob_put(new_blob);
- return -EINVAL;
- }
- if (expected_elem_size > 0 &&
- new_blob->length % expected_elem_size != 0) {
- drm_dbg_atomic(dev,
- "[BLOB:%d] length %zu not divisible by element size %zu\n",
- new_blob->base.id, new_blob->length, expected_elem_size);
- drm_property_blob_put(new_blob);
- return -EINVAL;
- }
- }
-
- *replaced |= drm_property_replace_blob(blob, new_blob);
- drm_property_blob_put(new_blob);
-
- return 0;
-}
-
static int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
struct drm_crtc_state *state, struct drm_property *property,
uint64_t val)
@@ -424,7 +382,7 @@ static int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
} else if (property == config->prop_vrr_enabled) {
state->vrr_enabled = val;
} else if (property == config->degamma_lut_property) {
- ret = drm_atomic_replace_property_blob_from_id(dev,
+ ret = drm_property_replace_blob_from_id(dev,
&state->degamma_lut,
val,
-1, sizeof(struct drm_color_lut),
@@ -432,7 +390,7 @@ static int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
state->color_mgmt_changed |= replaced;
return ret;
} else if (property == config->ctm_property) {
- ret = drm_atomic_replace_property_blob_from_id(dev,
+ ret = drm_property_replace_blob_from_id(dev,
&state->ctm,
val,
sizeof(struct drm_color_ctm), -1,
@@ -440,7 +398,7 @@ static int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
state->color_mgmt_changed |= replaced;
return ret;
} else if (property == config->gamma_lut_property) {
- ret = drm_atomic_replace_property_blob_from_id(dev,
+ ret = drm_property_replace_blob_from_id(dev,
&state->gamma_lut,
val,
-1, sizeof(struct drm_color_lut),
@@ -581,7 +539,7 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
} else if (property == plane->color_range_property) {
state->color_range = val;
} else if (property == config->prop_fb_damage_clips) {
- ret = drm_atomic_replace_property_blob_from_id(dev,
+ ret = drm_property_replace_blob_from_id(dev,
&state->fb_damage_clips,
val,
-1,
@@ -778,7 +736,7 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
if (state->link_status != DRM_LINK_STATUS_GOOD)
state->link_status = val;
} else if (property == config->hdr_output_metadata_property) {
- ret = drm_atomic_replace_property_blob_from_id(dev,
+ ret = drm_property_replace_blob_from_id(dev,
&state->hdr_output_metadata,
val,
sizeof(struct hdr_output_metadata), -1,
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index 252c105d614f..22aa015df387 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -234,7 +234,7 @@ static int
drm_master_check_perm(struct drm_device *dev, struct drm_file *file_priv)
{
if (file_priv->was_master &&
- rcu_access_pointer(file_priv->pid) == task_pid(current))
+ rcu_access_pointer(file_priv->pid) == task_tgid(current))
return 0;
if (!capable(CAP_SYS_ADMIN))
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index df9bf3c9206e..cb90e70d85e8 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -715,8 +715,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
struct drm_mode_set set;
uint32_t __user *set_connectors_ptr;
struct drm_modeset_acquire_ctx ctx;
- int ret;
- int i;
+ int ret, i, num_connectors = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EOPNOTSUPP;
@@ -871,6 +870,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
connector->name);
connector_set[i] = connector;
+ num_connectors++;
}
}
@@ -879,7 +879,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
set.y = crtc_req->y;
set.mode = mode;
set.connectors = connector_set;
- set.num_connectors = crtc_req->count_connectors;
+ set.num_connectors = num_connectors;
set.fb = fb;
if (drm_drv_uses_atomic_modeset(dev))
@@ -892,7 +892,7 @@ out:
drm_framebuffer_put(fb);
if (connector_set) {
- for (i = 0; i < crtc_req->count_connectors; i++) {
+ for (i = 0; i < num_connectors; i++) {
if (connector_set[i])
drm_connector_put(connector_set[i]);
}
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 02e7481758c0..f4715a67e340 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -638,7 +638,7 @@ void drm_debugfs_encoder_add(struct drm_encoder *encoder)
debugfs_create_file("bridges", 0444, root, encoder,
&bridges_fops);
- if (encoder->funcs->debugfs_init)
+ if (encoder->funcs && encoder->funcs->debugfs_init)
encoder->funcs->debugfs_init(encoder, root);
}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index cb4031d5dcbb..69c68804023f 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -2311,7 +2311,8 @@ int drm_edid_override_connector_update(struct drm_connector *connector)
override = drm_edid_override_get(connector);
if (override) {
- num_modes = drm_edid_connector_update(connector, override);
+ if (drm_edid_connector_update(connector, override) == 0)
+ num_modes = drm_edid_connector_add_modes(connector);
drm_edid_free(override);
diff --git a/drivers/gpu/drm/drm_exec.c b/drivers/gpu/drm/drm_exec.c
index 5d2809de4517..48ee851b61d9 100644
--- a/drivers/gpu/drm/drm_exec.c
+++ b/drivers/gpu/drm/drm_exec.c
@@ -69,16 +69,23 @@ static void drm_exec_unlock_all(struct drm_exec *exec)
* drm_exec_init - initialize a drm_exec object
* @exec: the drm_exec object to initialize
* @flags: controls locking behavior, see DRM_EXEC_* defines
+ * @nr: the initial # of objects
*
* Initialize the object and make sure that we can track locked objects.
+ *
+ * If nr is non-zero then it is used as the initial objects table size.
+ * In either case, the table will grow (be re-allocated) on demand.
*/
-void drm_exec_init(struct drm_exec *exec, uint32_t flags)
+void drm_exec_init(struct drm_exec *exec, uint32_t flags, unsigned nr)
{
+ if (!nr)
+ nr = PAGE_SIZE / sizeof(void *);
+
exec->flags = flags;
- exec->objects = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ exec->objects = kvmalloc_array(nr, sizeof(void *), GFP_KERNEL);
/* If allocation here fails, just delay that till the first use */
- exec->max_objects = exec->objects ? PAGE_SIZE / sizeof(void *) : 0;
+ exec->max_objects = exec->objects ? nr : 0;
exec->num_objects = 0;
exec->contended = DRM_EXEC_DUMMY;
exec->prelocked = NULL;
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 3cc0ffc28e86..888aadb6a4ac 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -461,6 +461,7 @@ int drm_mode_rmfb(struct drm_device *dev, u32 fb_id,
INIT_WORK_ONSTACK(&arg.work, drm_mode_rmfb_work_fn);
INIT_LIST_HEAD(&arg.fbs);
+ drm_WARN_ON(dev, !list_empty(&fb->filp_head));
list_add_tail(&fb->filp_head, &arg.fbs);
schedule_work(&arg.work);
@@ -827,6 +828,8 @@ void drm_framebuffer_free(struct kref *kref)
container_of(kref, struct drm_framebuffer, base.refcount);
struct drm_device *dev = fb->dev;
+ drm_WARN_ON(dev, !list_empty(&fb->filp_head));
+
/*
* The lookup idr holds a weak reference, which has not necessarily been
* removed at this point. Check for that.
@@ -1119,7 +1122,7 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb)
dev = fb->dev;
- WARN_ON(!list_empty(&fb->filp_head));
+ drm_WARN_ON(dev, !list_empty(&fb->filp_head));
/*
* drm ABI mandates that we remove any deleted framebuffers from active
diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c
index 9c0922c1a5a2..f9eb56f24bef 100644
--- a/drivers/gpu/drm/drm_gpuvm.c
+++ b/drivers/gpu/drm/drm_gpuvm.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0 OR MIT
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
/*
* Copyright (c) 2022 Red Hat.
*
@@ -1250,7 +1250,7 @@ drm_gpuvm_exec_lock(struct drm_gpuvm_exec *vm_exec)
unsigned int num_fences = vm_exec->num_fences;
int ret;
- drm_exec_init(exec, vm_exec->flags);
+ drm_exec_init(exec, vm_exec->flags, 0);
drm_exec_until_all_locked(exec) {
ret = drm_gpuvm_prepare_vm(gpuvm, exec, num_fences);
@@ -1341,7 +1341,7 @@ drm_gpuvm_exec_lock_range(struct drm_gpuvm_exec *vm_exec,
struct drm_exec *exec = &vm_exec->exec;
int ret;
- drm_exec_init(exec, vm_exec->flags);
+ drm_exec_init(exec, vm_exec->flags, 0);
drm_exec_until_all_locked(exec) {
ret = drm_gpuvm_prepare_range(gpuvm, exec, addr, range,
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index ac9a406250c5..893f52ee4926 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -2617,8 +2617,7 @@ void drm_mode_convert_to_umode(struct drm_mode_modeinfo *out,
break;
}
- strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
- out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
+ strscpy_pad(out->name, in->name, sizeof(out->name));
}
/**
@@ -2659,8 +2658,7 @@ int drm_mode_convert_umode(struct drm_device *dev,
* useful for the kernel->userspace direction anyway.
*/
out->type = in->type & DRM_MODE_TYPE_ALL;
- strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
- out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
+ strscpy_pad(out->name, in->name, sizeof(out->name));
/* Clearing picture aspect ratio bits from out flags,
* as the aspect-ratio information is not stored in
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 9e8e4c60983d..672c655c7a8e 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -1503,6 +1503,7 @@ retry:
out:
if (fb)
drm_framebuffer_put(fb);
+ fb = NULL;
if (plane->old_fb)
drm_framebuffer_put(plane->old_fb);
plane->old_fb = NULL;
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 63b709a67471..834a5e28abbe 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -278,7 +278,7 @@ void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
}
EXPORT_SYMBOL(drm_gem_dmabuf_release);
-/*
+/**
* drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
* @dev: drm_device to import into
* @file_priv: drm file-private structure
@@ -292,9 +292,9 @@ EXPORT_SYMBOL(drm_gem_dmabuf_release);
*
* Returns 0 on success or a negative error code on failure.
*/
-static int drm_gem_prime_fd_to_handle(struct drm_device *dev,
- struct drm_file *file_priv, int prime_fd,
- uint32_t *handle)
+int drm_gem_prime_fd_to_handle(struct drm_device *dev,
+ struct drm_file *file_priv, int prime_fd,
+ uint32_t *handle)
{
struct dma_buf *dma_buf;
struct drm_gem_object *obj;
@@ -360,6 +360,7 @@ out_put:
dma_buf_put(dma_buf);
return ret;
}
+EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -408,7 +409,7 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev,
return dmabuf;
}
-/*
+/**
* drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
* @dev: dev to export the buffer from
* @file_priv: drm file-private structure
@@ -421,10 +422,10 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev,
* The actual exporting from GEM object to a dma-buf is done through the
* &drm_gem_object_funcs.export callback.
*/
-static int drm_gem_prime_handle_to_fd(struct drm_device *dev,
- struct drm_file *file_priv, uint32_t handle,
- uint32_t flags,
- int *prime_fd)
+int drm_gem_prime_handle_to_fd(struct drm_device *dev,
+ struct drm_file *file_priv, uint32_t handle,
+ uint32_t flags,
+ int *prime_fd)
{
struct drm_gem_object *obj;
int ret = 0;
@@ -506,6 +507,7 @@ out_unlock:
return ret;
}
+EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -864,9 +866,9 @@ EXPORT_SYMBOL(drm_prime_get_contiguous_size);
* @obj: GEM object to export
* @flags: flags like DRM_CLOEXEC and DRM_RDWR
*
- * This is the implementation of the &drm_gem_object_funcs.export functions
- * for GEM drivers using the PRIME helpers. It is used as the default for
- * drivers that do not set their own.
+ * This is the implementation of the &drm_gem_object_funcs.export functions for GEM drivers
+ * using the PRIME helpers. It is used as the default in
+ * drm_gem_prime_handle_to_fd().
*/
struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
int flags)
@@ -962,9 +964,10 @@ EXPORT_SYMBOL(drm_gem_prime_import_dev);
* @dev: drm_device to import into
* @dma_buf: dma-buf object to import
*
- * This is the implementation of the gem_prime_import functions for GEM
- * drivers using the PRIME helpers. It is the default for drivers that do
- * not set their own &drm_driver.gem_prime_import.
+ * This is the implementation of the gem_prime_import functions for GEM drivers
+ * using the PRIME helpers. Drivers can use this as their
+ * &drm_driver.gem_prime_import implementation. It is used as the default
+ * implementation in drm_gem_prime_fd_to_handle().
*
* Drivers must arrange to call drm_prime_gem_destroy() from their
* &drm_gem_object_funcs.free hook when using this function.
diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c
index dfec479830e4..596272149a35 100644
--- a/drivers/gpu/drm/drm_property.c
+++ b/drivers/gpu/drm/drm_property.c
@@ -27,6 +27,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_print.h>
#include <drm/drm_property.h>
#include "drm_crtc_internal.h"
@@ -751,6 +752,64 @@ bool drm_property_replace_blob(struct drm_property_blob **blob,
}
EXPORT_SYMBOL(drm_property_replace_blob);
+/**
+ * drm_property_replace_blob_from_id - replace a blob property taking a reference
+ * @dev: DRM device
+ * @blob: a pointer to the member blob to be replaced
+ * @blob_id: the id of the new blob to replace with
+ * @expected_size: expected size of the blob property
+ * @expected_elem_size: expected size of an element in the blob property
+ * @replaced: if the blob was in fact replaced
+ *
+ * Look up the new blob from id, take its reference, check expected sizes of
+ * the blob and its element and replace the old blob by the new one. Advertise
+ * if the replacement operation was successful.
+ *
+ * Return: true if the blob was in fact replaced. -EINVAL if the new blob was
+ * not found or sizes don't match.
+ */
+int drm_property_replace_blob_from_id(struct drm_device *dev,
+ struct drm_property_blob **blob,
+ uint64_t blob_id,
+ ssize_t expected_size,
+ ssize_t expected_elem_size,
+ bool *replaced)
+{
+ struct drm_property_blob *new_blob = NULL;
+
+ if (blob_id != 0) {
+ new_blob = drm_property_lookup_blob(dev, blob_id);
+ if (new_blob == NULL) {
+ drm_dbg_atomic(dev,
+ "cannot find blob ID %llu\n", blob_id);
+ return -EINVAL;
+ }
+
+ if (expected_size > 0 &&
+ new_blob->length != expected_size) {
+ drm_dbg_atomic(dev,
+ "[BLOB:%d] length %zu different from expected %zu\n",
+ new_blob->base.id, new_blob->length, expected_size);
+ drm_property_blob_put(new_blob);
+ return -EINVAL;
+ }
+ if (expected_elem_size > 0 &&
+ new_blob->length % expected_elem_size != 0) {
+ drm_dbg_atomic(dev,
+ "[BLOB:%d] length %zu not divisible by element size %zu\n",
+ new_blob->base.id, new_blob->length, expected_elem_size);
+ drm_property_blob_put(new_blob);
+ return -EINVAL;
+ }
+ }
+
+ *replaced |= drm_property_replace_blob(blob, new_blob);
+ drm_property_blob_put(new_blob);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_property_replace_blob_from_id);
+
int drm_mode_getblob_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index cbb65b7ba425..84101baeecc6 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -1403,7 +1403,7 @@ static void syncobj_eventfd_entry_fence_func(struct dma_fence *fence,
struct syncobj_eventfd_entry *entry =
container_of(cb, struct syncobj_eventfd_entry, fence_cb);
- eventfd_signal(entry->ev_fd_ctx, 1);
+ eventfd_signal(entry->ev_fd_ctx);
syncobj_eventfd_entry_free(entry);
}
@@ -1426,13 +1426,13 @@ syncobj_eventfd_entry_func(struct drm_syncobj *syncobj,
entry->fence = fence;
if (entry->flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) {
- eventfd_signal(entry->ev_fd_ctx, 1);
+ eventfd_signal(entry->ev_fd_ctx);
syncobj_eventfd_entry_free(entry);
} else {
ret = dma_fence_add_callback(fence, &entry->fence_cb,
syncobj_eventfd_entry_fence_func);
if (ret == -ENOENT) {
- eventfd_signal(entry->ev_fd_ctx, 1);
+ eventfd_signal(entry->ev_fd_ctx);
syncobj_eventfd_entry_free(entry);
}
}
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 4d986077738b..776f2f0b602d 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -862,18 +862,16 @@ err_disable_pm_runtime:
return ret;
}
-static int exynos5433_decon_remove(struct platform_device *pdev)
+static void exynos5433_decon_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
component_del(&pdev->dev, &decon_component_ops);
-
- return 0;
}
struct platform_driver exynos5433_decon_driver = {
.probe = exynos5433_decon_probe,
- .remove = exynos5433_decon_remove,
+ .remove_new = exynos5433_decon_remove,
.driver = {
.name = "exynos5433-decon",
.pm = pm_ptr(&exynos5433_decon_pm_ops),
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 0156a5e94435..0d185c0564b9 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -765,7 +765,7 @@ err_iounmap:
return ret;
}
-static int decon_remove(struct platform_device *pdev)
+static void decon_remove(struct platform_device *pdev)
{
struct decon_context *ctx = dev_get_drvdata(&pdev->dev);
@@ -774,8 +774,6 @@ static int decon_remove(struct platform_device *pdev)
iounmap(ctx->regs);
component_del(&pdev->dev, &decon_component_ops);
-
- return 0;
}
static int exynos7_decon_suspend(struct device *dev)
@@ -840,7 +838,7 @@ static DEFINE_RUNTIME_DEV_PM_OPS(exynos7_decon_pm_ops, exynos7_decon_suspend,
struct platform_driver decon_driver = {
.probe = decon_probe,
- .remove = decon_remove,
+ .remove_new = decon_remove,
.driver = {
.name = "exynos-decon",
.pm = pm_ptr(&exynos7_decon_pm_ops),
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index 3404ec1367fb..ca31bad6c576 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -250,14 +250,12 @@ out:
return component_add(&pdev->dev, &exynos_dp_ops);
}
-static int exynos_dp_remove(struct platform_device *pdev)
+static void exynos_dp_remove(struct platform_device *pdev)
{
struct exynos_dp_device *dp = platform_get_drvdata(pdev);
component_del(&pdev->dev, &exynos_dp_ops);
analogix_dp_remove(dp->adp);
-
- return 0;
}
static int exynos_dp_suspend(struct device *dev)
@@ -285,7 +283,7 @@ MODULE_DEVICE_TABLE(of, exynos_dp_match);
struct platform_driver dp_driver = {
.probe = exynos_dp_probe,
- .remove = exynos_dp_remove,
+ .remove_new = exynos_dp_remove,
.driver = {
.name = "exynos-dp",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dma.c b/drivers/gpu/drm/exynos/exynos_drm_dma.c
index a971590b8132..e2c7373f20c6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dma.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dma.c
@@ -107,18 +107,16 @@ int exynos_drm_register_dma(struct drm_device *drm, struct device *dev,
return 0;
if (!priv->mapping) {
- void *mapping;
+ void *mapping = NULL;
if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
mapping = arm_iommu_create_mapping(&platform_bus_type,
EXYNOS_DEV_ADDR_START, EXYNOS_DEV_ADDR_SIZE);
else if (IS_ENABLED(CONFIG_IOMMU_DMA))
mapping = iommu_get_domain_for_dev(priv->dma_dev);
- else
- mapping = ERR_PTR(-ENODEV);
- if (IS_ERR(mapping))
- return PTR_ERR(mapping);
+ if (!mapping)
+ return -ENODEV;
priv->mapping = mapping;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 378e5381978f..0dc36df6ada3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -101,7 +101,7 @@ static int exynos_dpi_create_connector(struct drm_encoder *encoder)
ret = drm_connector_init(encoder->dev, connector,
&exynos_dpi_connector_funcs,
- DRM_MODE_CONNECTOR_VGA);
+ DRM_MODE_CONNECTOR_DPI);
if (ret) {
DRM_DEV_ERROR(ctx->dev,
"failed to initialize connector with drm\n");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 8399256cb5c9..7c59e1164a48 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -300,6 +300,7 @@ err_mode_config_cleanup:
drm_mode_config_cleanup(drm);
exynos_drm_cleanup_dma(drm);
kfree(private);
+ dev_set_drvdata(dev, NULL);
err_free_drm:
drm_dev_put(drm);
@@ -313,6 +314,7 @@ static void exynos_drm_unbind(struct device *dev)
drm_dev_unregister(drm);
drm_kms_helper_poll_fini(drm);
+ drm_atomic_helper_shutdown(drm);
component_unbind_all(drm->dev, drm);
drm_mode_config_cleanup(drm);
@@ -344,15 +346,23 @@ static int exynos_drm_platform_probe(struct platform_device *pdev)
match);
}
-static int exynos_drm_platform_remove(struct platform_device *pdev)
+static void exynos_drm_platform_remove(struct platform_device *pdev)
{
component_master_del(&pdev->dev, &exynos_drm_ops);
- return 0;
+}
+
+static void exynos_drm_platform_shutdown(struct platform_device *pdev)
+{
+ struct drm_device *drm = platform_get_drvdata(pdev);
+
+ if (drm)
+ drm_atomic_helper_shutdown(drm);
}
static struct platform_driver exynos_drm_platform_driver = {
.probe = exynos_drm_platform_probe,
- .remove = exynos_drm_platform_remove,
+ .remove_new = exynos_drm_platform_remove,
+ .shutdown = exynos_drm_platform_shutdown,
.driver = {
.name = "exynos-drm",
.pm = &exynos_drm_pm_ops,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 8de2714599fc..e81a576de398 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1367,7 +1367,7 @@ err_pm_dis:
return ret;
}
-static int fimc_remove(struct platform_device *pdev)
+static void fimc_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct fimc_context *ctx = get_fimc_context(dev);
@@ -1377,8 +1377,6 @@ static int fimc_remove(struct platform_device *pdev)
pm_runtime_disable(dev);
fimc_put_clocks(ctx);
-
- return 0;
}
static int fimc_runtime_suspend(struct device *dev)
@@ -1410,7 +1408,7 @@ MODULE_DEVICE_TABLE(of, fimc_of_match);
struct platform_driver fimc_driver = {
.probe = fimc_probe,
- .remove = fimc_remove,
+ .remove_new = fimc_remove,
.driver = {
.of_match_table = fimc_of_match,
.name = "exynos-drm-fimc",
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 8dde7b1e9b35..a9f1c5c05894 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -1277,13 +1277,11 @@ err_disable_pm_runtime:
return ret;
}
-static int fimd_remove(struct platform_device *pdev)
+static void fimd_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
component_del(&pdev->dev, &fimd_component_ops);
-
- return 0;
}
static int exynos_fimd_suspend(struct device *dev)
@@ -1325,7 +1323,7 @@ static DEFINE_RUNTIME_DEV_PM_OPS(exynos_fimd_pm_ops, exynos_fimd_suspend,
struct platform_driver fimd_driver = {
.probe = fimd_probe,
- .remove = fimd_remove,
+ .remove_new = fimd_remove,
.driver = {
.name = "exynos4-fb",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 414e585ec7dd..f3138423612e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -1530,7 +1530,7 @@ err_destroy_slab:
return ret;
}
-static int g2d_remove(struct platform_device *pdev)
+static void g2d_remove(struct platform_device *pdev)
{
struct g2d_data *g2d = platform_get_drvdata(pdev);
@@ -1545,8 +1545,6 @@ static int g2d_remove(struct platform_device *pdev)
g2d_fini_cmdlist(g2d);
destroy_workqueue(g2d->g2d_workq);
kmem_cache_destroy(g2d->runqueue_slab);
-
- return 0;
}
static int g2d_suspend(struct device *dev)
@@ -1609,7 +1607,7 @@ MODULE_DEVICE_TABLE(of, exynos_g2d_match);
struct platform_driver g2d_driver = {
.probe = g2d_probe,
- .remove = g2d_remove,
+ .remove_new = g2d_remove,
.driver = {
.name = "exynos-drm-g2d",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 35771fb4e85d..e9a769590415 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -1309,15 +1309,13 @@ err_pm_dis:
return ret;
}
-static int gsc_remove(struct platform_device *pdev)
+static void gsc_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
component_del(dev, &gsc_component_ops);
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
-
- return 0;
}
static int __maybe_unused gsc_runtime_suspend(struct device *dev)
@@ -1422,7 +1420,7 @@ MODULE_DEVICE_TABLE(of, exynos_drm_gsc_of_match);
struct platform_driver gsc_driver = {
.probe = gsc_probe,
- .remove = gsc_remove,
+ .remove_new = gsc_remove,
.driver = {
.name = "exynos-drm-gsc",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index 17bab5b1663f..e2920960180f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -442,7 +442,7 @@ err:
return ret;
}
-static int exynos_mic_remove(struct platform_device *pdev)
+static void exynos_mic_remove(struct platform_device *pdev)
{
struct exynos_mic *mic = platform_get_drvdata(pdev);
@@ -450,8 +450,6 @@ static int exynos_mic_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
drm_bridge_remove(&mic->bridge);
-
- return 0;
}
static const struct of_device_id exynos_mic_of_match[] = {
@@ -462,7 +460,7 @@ MODULE_DEVICE_TABLE(of, exynos_mic_of_match);
struct platform_driver mic_driver = {
.probe = exynos_mic_probe,
- .remove = exynos_mic_remove,
+ .remove_new = exynos_mic_remove,
.driver = {
.name = "exynos-mic",
.pm = pm_ptr(&exynos_mic_pm_ops),
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index ffb327c5139e..5f7516655b08 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -329,15 +329,13 @@ err_component:
return ret;
}
-static int rotator_remove(struct platform_device *pdev)
+static void rotator_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
component_del(dev, &rotator_component_ops);
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
-
- return 0;
}
static int rotator_runtime_suspend(struct device *dev)
@@ -453,7 +451,7 @@ static DEFINE_RUNTIME_DEV_PM_OPS(rotator_pm_ops, rotator_runtime_suspend,
struct platform_driver rotator_driver = {
.probe = rotator_probe,
- .remove = rotator_remove,
+ .remove_new = rotator_remove,
.driver = {
.name = "exynos-rotator",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
index f2b8b09a6b4e..392f721f13ab 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
@@ -539,15 +539,13 @@ err_ippdrv_register:
return ret;
}
-static int scaler_remove(struct platform_device *pdev)
+static void scaler_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
component_del(dev, &scaler_component_ops);
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
-
- return 0;
}
static int clk_disable_unprepare_wrapper(struct clk *clk)
@@ -721,7 +719,7 @@ MODULE_DEVICE_TABLE(of, exynos_scaler_match);
struct platform_driver scaler_driver = {
.probe = scaler_probe,
- .remove = scaler_remove,
+ .remove_new = scaler_remove,
.driver = {
.name = "exynos-scaler",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index f5e1adfcaa51..00382f28748a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -462,7 +462,7 @@ static int vidi_probe(struct platform_device *pdev)
return component_add(dev, &vidi_component_ops);
}
-static int vidi_remove(struct platform_device *pdev)
+static void vidi_remove(struct platform_device *pdev)
{
struct vidi_context *ctx = platform_get_drvdata(pdev);
@@ -472,13 +472,11 @@ static int vidi_remove(struct platform_device *pdev)
}
component_del(&pdev->dev, &vidi_component_ops);
-
- return 0;
}
struct platform_driver vidi_driver = {
.probe = vidi_probe,
- .remove = vidi_remove,
+ .remove_new = vidi_remove,
.driver = {
.name = "exynos-drm-vidi",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index f3aaa4ea3e68..43bed6cbaaea 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -1861,6 +1861,8 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
return ret;
crtc = exynos_drm_crtc_get_by_type(drm_dev, EXYNOS_DISPLAY_TYPE_HDMI);
+ if (IS_ERR(crtc))
+ return PTR_ERR(crtc);
crtc->pipe_clk = &hdata->phy_clk;
ret = hdmi_create_connector(encoder);
@@ -2067,7 +2069,7 @@ err_ddc:
return ret;
}
-static int hdmi_remove(struct platform_device *pdev)
+static void hdmi_remove(struct platform_device *pdev)
{
struct hdmi_context *hdata = platform_get_drvdata(pdev);
@@ -2090,8 +2092,6 @@ static int hdmi_remove(struct platform_device *pdev)
put_device(&hdata->ddc_adpt->dev);
mutex_destroy(&hdata->mutex);
-
- return 0;
}
static int __maybe_unused exynos_hdmi_suspend(struct device *dev)
@@ -2123,7 +2123,7 @@ static const struct dev_pm_ops exynos_hdmi_pm_ops = {
struct platform_driver hdmi_driver = {
.probe = hdmi_probe,
- .remove = hdmi_remove,
+ .remove_new = hdmi_remove,
.driver = {
.name = "exynos-hdmi",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index b302392ff0d7..6822333fd0e6 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -1258,13 +1258,11 @@ static int mixer_probe(struct platform_device *pdev)
return ret;
}
-static int mixer_remove(struct platform_device *pdev)
+static void mixer_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
component_del(&pdev->dev, &mixer_component_ops);
-
- return 0;
}
static int __maybe_unused exynos_mixer_suspend(struct device *dev)
@@ -1338,5 +1336,5 @@ struct platform_driver mixer_driver = {
.of_match_table = mixer_match_types,
},
.probe = mixer_probe,
- .remove = mixer_remove,
+ .remove_new = mixer_remove,
};
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 8992a95076f2..dd1eb7e9877d 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -855,7 +855,6 @@ cdv_intel_dp_i2c_init(struct gma_connector *connector,
memset(&intel_dp->adapter, '\0', sizeof (intel_dp->adapter));
intel_dp->adapter.owner = THIS_MODULE;
- intel_dp->adapter.class = I2C_CLASS_DDC;
strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
intel_dp->adapter.algo_data = &intel_dp->algo;
diff --git a/drivers/gpu/drm/gma500/intel_gmbus.c b/drivers/gpu/drm/gma500/intel_gmbus.c
index 09cedabf4776..aa45509859f2 100644
--- a/drivers/gpu/drm/gma500/intel_gmbus.c
+++ b/drivers/gpu/drm/gma500/intel_gmbus.c
@@ -411,7 +411,6 @@ int gma_intel_setup_gmbus(struct drm_device *dev)
struct intel_gmbus *bus = &dev_priv->gmbus[i];
bus->adapter.owner = THIS_MODULE;
- bus->adapter.class = I2C_CLASS_DDC;
snprintf(bus->adapter.name,
sizeof(bus->adapter.name),
"gma500 gmbus %s",
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
index fc9a34ed58bd..6daa6669ed23 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
@@ -168,7 +168,6 @@ static struct i2c_adapter oaktrail_hdmi_i2c_adapter = {
.name = "oaktrail_hdmi_i2c",
.nr = 3,
.owner = THIS_MODULE,
- .class = I2C_CLASS_DDC,
.algo = &oaktrail_hdmi_i2c_algorithm,
};
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index d6fd5d726216..e4f914deceba 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -2426,7 +2426,6 @@ psb_intel_sdvo_init_ddc_proxy(struct psb_intel_sdvo *sdvo,
struct drm_device *dev)
{
sdvo->ddc.owner = THIS_MODULE;
- sdvo->ddc.class = I2C_CLASS_DDC;
snprintf(sdvo->ddc.name, I2C_NAME_SIZE, "SDVO DDC proxy");
sdvo->ddc.dev.parent = dev->dev;
sdvo->ddc.algo_data = sdvo;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
index 410bd019bb35..e6e48651c15c 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
@@ -81,7 +81,6 @@ int hibmc_ddc_create(struct drm_device *drm_dev,
struct hibmc_connector *connector)
{
connector->adapter.owner = THIS_MODULE;
- connector->adapter.class = I2C_CLASS_DDC;
snprintf(connector->adapter.name, I2C_NAME_SIZE, "HIS i2c bit bus");
connector->adapter.dev.parent = drm_dev->dev;
i2c_set_adapdata(&connector->adapter, connector);
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
index d511d17c5bdf..cff85086f2d6 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
@@ -7,7 +7,6 @@
#include <linux/hyperv.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/screen_info.h>
#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
@@ -73,11 +72,6 @@ static int hyperv_setup_vram(struct hyperv_drm_device *hv,
struct drm_device *dev = &hv->dev;
int ret;
- if (IS_ENABLED(CONFIG_SYSFB))
- drm_aperture_remove_conflicting_framebuffers(screen_info.lfb_base,
- screen_info.lfb_size,
- &hyperv_driver);
-
hv->fb_size = (unsigned long)hv->mmio_megabytes * 1024 * 1024;
ret = vmbus_allocate_mmio(&hv->mem, hdev, 0, -1, hv->fb_size, 0x100000,
@@ -130,6 +124,8 @@ static int hyperv_vmbus_probe(struct hv_device *hdev,
goto err_hv_set_drv_data;
}
+ drm_aperture_remove_framebuffers(&hyperv_driver);
+
ret = hyperv_setup_vram(hv, hdev);
if (ret)
goto err_vmbus_close;
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 2d21930d5501..5b7162076850 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -24,7 +24,9 @@ config DRM_I915_DEBUG
select DEBUG_FS
select PREEMPT_COUNT
select I2C_CHARDEV
+ select REF_TRACKER
select STACKDEPOT
+ select STACKTRACE
select DRM_DP_AUX_CHARDEV
select X86_MSR # used by igt/pm_rpm
select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
@@ -38,6 +40,7 @@ config DRM_I915_DEBUG
select DRM_I915_DEBUG_GEM_ONCE
select DRM_I915_DEBUG_MMIO
select DRM_I915_DEBUG_RUNTIME_PM
+ select DRM_I915_DEBUG_WAKEREF
select DRM_I915_SW_FENCE_DEBUG_OBJECTS
select DRM_I915_SELFTEST
default n
@@ -231,7 +234,9 @@ config DRM_I915_DEBUG_RUNTIME_PM
bool "Enable extra state checking for runtime PM"
depends on DRM_I915
default n
+ select REF_TRACKER
select STACKDEPOT
+ select STACKTRACE
help
Choose this option to turn on extra state checking for the
runtime PM functionality. This may introduce overhead during
@@ -240,3 +245,16 @@ config DRM_I915_DEBUG_RUNTIME_PM
Recommended for driver developers only.
If in doubt, say "N"
+
+config DRM_I915_DEBUG_WAKEREF
+ bool "Enable extra tracking for wakerefs"
+ depends on DRM_I915
+ select REF_TRACKER
+ select STACKDEPOT
+ select STACKTRACE
+ help
+ Choose this option to turn on extra state checking and usage
+ tracking for the wakerefPM functionality. This may introduce
+ overhead during driver runtime.
+
+ If in doubt, say "N"
diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.c b/drivers/gpu/drm/i915/display/i9xx_wm.c
index 03e8fb6caa83..11ca9572e8b3 100644
--- a/drivers/gpu/drm/i915/display/i9xx_wm.c
+++ b/drivers/gpu/drm/i915/display/i9xx_wm.c
@@ -608,7 +608,7 @@ static bool intel_crtc_active(struct intel_crtc *crtc)
* crtc->state->active once we have proper CRTC states wired up
* for atomic.
*/
- return crtc && crtc->active && crtc->base.primary->state->fb &&
+ return crtc->active && crtc->base.primary->state->fb &&
crtc->config->hw.adjusted_mode.crtc_clock;
}
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
index 612d4cd9dacb..3f3cd944a1c5 100644
--- a/drivers/gpu/drm/i915/display/intel_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -275,7 +275,7 @@ static void ext_pwm_set_backlight(const struct drm_connector_state *conn_state,
struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel;
pwm_set_relative_duty_cycle(&panel->backlight.pwm_state, level, 100);
- pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state);
+ pwm_apply_might_sleep(panel->backlight.pwm, &panel->backlight.pwm_state);
}
static void
@@ -428,7 +428,7 @@ static void ext_pwm_disable_backlight(const struct drm_connector_state *old_conn
intel_backlight_set_pwm_level(old_conn_state, level);
panel->backlight.pwm_state.enabled = false;
- pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state);
+ pwm_apply_might_sleep(panel->backlight.pwm, &panel->backlight.pwm_state);
}
void intel_backlight_disable(const struct drm_connector_state *old_conn_state)
@@ -750,7 +750,7 @@ static void ext_pwm_enable_backlight(const struct intel_crtc_state *crtc_state,
pwm_set_relative_duty_cycle(&panel->backlight.pwm_state, level, 100);
panel->backlight.pwm_state.enabled = true;
- pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state);
+ pwm_apply_might_sleep(panel->backlight.pwm, &panel->backlight.pwm_state);
}
static void __intel_backlight_enable(const struct intel_crtc_state *crtc_state,
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 3e7e96acb24a..aa169b0055e9 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -3475,8 +3475,7 @@ bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
if (!devdata->dsc)
return false;
- if (crtc_state)
- fill_dsc(crtc_state, devdata->dsc, dsc_max_bpc);
+ fill_dsc(crtc_state, devdata->dsc, dsc_max_bpc);
return true;
}
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 8bb6bab7c8cd..c985ebb6831a 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -1180,7 +1180,7 @@ sanitize:
/* force cdclk programming */
dev_priv->display.cdclk.hw.cdclk = 0;
/* force full PLL disable + enable */
- dev_priv->display.cdclk.hw.vco = -1;
+ dev_priv->display.cdclk.hw.vco = ~0;
}
static void skl_cdclk_init_hw(struct drm_i915_private *dev_priv)
@@ -1446,50 +1446,77 @@ static u8 bxt_calc_voltage_level(int cdclk)
return DIV_ROUND_UP(cdclk, 25000);
}
+static u8 calc_voltage_level(int cdclk, int num_voltage_levels,
+ const int voltage_level_max_cdclk[])
+{
+ int voltage_level;
+
+ for (voltage_level = 0; voltage_level < num_voltage_levels; voltage_level++) {
+ if (cdclk <= voltage_level_max_cdclk[voltage_level])
+ return voltage_level;
+ }
+
+ MISSING_CASE(cdclk);
+ return num_voltage_levels - 1;
+}
+
static u8 icl_calc_voltage_level(int cdclk)
{
- if (cdclk > 556800)
- return 2;
- else if (cdclk > 312000)
- return 1;
- else
- return 0;
+ static const int icl_voltage_level_max_cdclk[] = {
+ [0] = 312000,
+ [1] = 556800,
+ [2] = 652800,
+ };
+
+ return calc_voltage_level(cdclk,
+ ARRAY_SIZE(icl_voltage_level_max_cdclk),
+ icl_voltage_level_max_cdclk);
}
static u8 ehl_calc_voltage_level(int cdclk)
{
- if (cdclk > 326400)
- return 3;
- else if (cdclk > 312000)
- return 2;
- else if (cdclk > 180000)
- return 1;
- else
- return 0;
+ static const int ehl_voltage_level_max_cdclk[] = {
+ [0] = 180000,
+ [1] = 312000,
+ [2] = 326400,
+ /*
+ * Bspec lists the limit as 556.8 MHz, but some JSL
+ * development boards (at least) boot with 652.8 MHz
+ */
+ [3] = 652800,
+ };
+
+ return calc_voltage_level(cdclk,
+ ARRAY_SIZE(ehl_voltage_level_max_cdclk),
+ ehl_voltage_level_max_cdclk);
}
static u8 tgl_calc_voltage_level(int cdclk)
{
- if (cdclk > 556800)
- return 3;
- else if (cdclk > 326400)
- return 2;
- else if (cdclk > 312000)
- return 1;
- else
- return 0;
+ static const int tgl_voltage_level_max_cdclk[] = {
+ [0] = 312000,
+ [1] = 326400,
+ [2] = 556800,
+ [3] = 652800,
+ };
+
+ return calc_voltage_level(cdclk,
+ ARRAY_SIZE(tgl_voltage_level_max_cdclk),
+ tgl_voltage_level_max_cdclk);
}
static u8 rplu_calc_voltage_level(int cdclk)
{
- if (cdclk > 556800)
- return 3;
- else if (cdclk > 480000)
- return 2;
- else if (cdclk > 312000)
- return 1;
- else
- return 0;
+ static const int rplu_voltage_level_max_cdclk[] = {
+ [0] = 312000,
+ [1] = 480000,
+ [2] = 556800,
+ [3] = 652800,
+ };
+
+ return calc_voltage_level(cdclk,
+ ARRAY_SIZE(rplu_voltage_level_max_cdclk),
+ rplu_voltage_level_max_cdclk);
}
static void icl_readout_refclk(struct drm_i915_private *dev_priv,
@@ -1800,6 +1827,8 @@ static bool cdclk_pll_is_unknown(unsigned int vco)
return vco == ~0;
}
+static const int cdclk_squash_len = 16;
+
static int cdclk_squash_divider(u16 waveform)
{
return hweight16(waveform ?: 0xffff);
@@ -1811,7 +1840,6 @@ static bool cdclk_compute_crawl_and_squash_midpoint(struct drm_i915_private *i91
struct intel_cdclk_config *mid_cdclk_config)
{
u16 old_waveform, new_waveform, mid_waveform;
- int size = 16;
int div = 2;
/* Return if PLL is in an unknown state, force a complete disable and re-enable. */
@@ -1850,7 +1878,8 @@ static bool cdclk_compute_crawl_and_squash_midpoint(struct drm_i915_private *i91
}
mid_cdclk_config->cdclk = DIV_ROUND_CLOSEST(cdclk_squash_divider(mid_waveform) *
- mid_cdclk_config->vco, size * div);
+ mid_cdclk_config->vco,
+ cdclk_squash_len * div);
/* make sure the mid clock came out sane */
@@ -1878,9 +1907,9 @@ static void _bxt_set_cdclk(struct drm_i915_private *dev_priv,
{
int cdclk = cdclk_config->cdclk;
int vco = cdclk_config->vco;
- u32 val;
+ int unsquashed_cdclk;
u16 waveform;
- int clock;
+ u32 val;
if (HAS_CDCLK_CRAWL(dev_priv) && dev_priv->display.cdclk.hw.vco > 0 && vco > 0 &&
!cdclk_pll_is_unknown(dev_priv->display.cdclk.hw.vco)) {
@@ -1897,15 +1926,13 @@ static void _bxt_set_cdclk(struct drm_i915_private *dev_priv,
waveform = cdclk_squash_waveform(dev_priv, cdclk);
- if (waveform)
- clock = vco / 2;
- else
- clock = cdclk;
+ unsquashed_cdclk = DIV_ROUND_CLOSEST(cdclk * cdclk_squash_len,
+ cdclk_squash_divider(waveform));
if (HAS_CDCLK_SQUASH(dev_priv))
dg2_cdclk_squash_program(dev_priv, waveform);
- val = bxt_cdclk_cd2x_div_sel(dev_priv, clock, vco) |
+ val = bxt_cdclk_cd2x_div_sel(dev_priv, unsquashed_cdclk, vco) |
bxt_cdclk_cd2x_pipe(dev_priv, pipe);
/*
@@ -2075,7 +2102,7 @@ sanitize:
dev_priv->display.cdclk.hw.cdclk = 0;
/* force full PLL disable + enable */
- dev_priv->display.cdclk.hw.vco = -1;
+ dev_priv->display.cdclk.hw.vco = ~0;
}
static void bxt_cdclk_init_hw(struct drm_i915_private *dev_priv)
@@ -3489,7 +3516,7 @@ static const struct intel_cdclk_funcs mtl_cdclk_funcs = {
.get_cdclk = bxt_get_cdclk,
.set_cdclk = bxt_set_cdclk,
.modeset_calc_cdclk = bxt_modeset_calc_cdclk,
- .calc_voltage_level = tgl_calc_voltage_level,
+ .calc_voltage_level = rplu_calc_voltage_level,
};
static const struct intel_cdclk_funcs rplu_cdclk_funcs = {
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
index 1fd068e6e26c..8a84a31c7b48 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc.c
@@ -553,8 +553,15 @@ void intel_pipe_update_start(struct intel_atomic_state *state,
intel_psr_lock(new_crtc_state);
- if (new_crtc_state->do_async_flip)
+ if (new_crtc_state->do_async_flip) {
+ spin_lock_irq(&crtc->base.dev->event_lock);
+ /* arm the event for the flip done irq handler */
+ crtc->flip_done_event = new_crtc_state->uapi.event;
+ spin_unlock_irq(&crtc->base.dev->event_lock);
+
+ new_crtc_state->uapi.event = NULL;
return;
+ }
if (intel_crtc_needs_vblank_work(new_crtc_state))
intel_crtc_vblank_work_init(new_crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
index 5fbec5784b83..6b25e195232f 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
@@ -745,7 +745,6 @@ static const struct intel_c10pll_state * const mtl_c10_edp_tables[] = {
/* C20 basic DP 1.4 tables */
static const struct intel_c20pll_state mtl_c20_dp_rbr = {
- .link_bit_rate = 162000,
.clock = 162000,
.tx = { 0xbe88, /* tx cfg0 */
0x5800, /* tx cfg1 */
@@ -771,7 +770,6 @@ static const struct intel_c20pll_state mtl_c20_dp_rbr = {
};
static const struct intel_c20pll_state mtl_c20_dp_hbr1 = {
- .link_bit_rate = 270000,
.clock = 270000,
.tx = { 0xbe88, /* tx cfg0 */
0x4800, /* tx cfg1 */
@@ -797,7 +795,6 @@ static const struct intel_c20pll_state mtl_c20_dp_hbr1 = {
};
static const struct intel_c20pll_state mtl_c20_dp_hbr2 = {
- .link_bit_rate = 540000,
.clock = 540000,
.tx = { 0xbe88, /* tx cfg0 */
0x4800, /* tx cfg1 */
@@ -823,7 +820,6 @@ static const struct intel_c20pll_state mtl_c20_dp_hbr2 = {
};
static const struct intel_c20pll_state mtl_c20_dp_hbr3 = {
- .link_bit_rate = 810000,
.clock = 810000,
.tx = { 0xbe88, /* tx cfg0 */
0x4800, /* tx cfg1 */
@@ -850,8 +846,7 @@ static const struct intel_c20pll_state mtl_c20_dp_hbr3 = {
/* C20 basic DP 2.0 tables */
static const struct intel_c20pll_state mtl_c20_dp_uhbr10 = {
- .link_bit_rate = 1000000, /* 10 Gbps */
- .clock = 312500,
+ .clock = 1000000, /* 10 Gbps */
.tx = { 0xbe21, /* tx cfg0 */
0x4800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -875,8 +870,7 @@ static const struct intel_c20pll_state mtl_c20_dp_uhbr10 = {
};
static const struct intel_c20pll_state mtl_c20_dp_uhbr13_5 = {
- .link_bit_rate = 1350000, /* 13.5 Gbps */
- .clock = 421875,
+ .clock = 1350000, /* 13.5 Gbps */
.tx = { 0xbea0, /* tx cfg0 */
0x4800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -901,8 +895,7 @@ static const struct intel_c20pll_state mtl_c20_dp_uhbr13_5 = {
};
static const struct intel_c20pll_state mtl_c20_dp_uhbr20 = {
- .link_bit_rate = 2000000, /* 20 Gbps */
- .clock = 625000,
+ .clock = 2000000, /* 20 Gbps */
.tx = { 0xbe20, /* tx cfg0 */
0x4800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -1521,7 +1514,6 @@ static const struct intel_c10pll_state * const mtl_c10_hdmi_tables[] = {
};
static const struct intel_c20pll_state mtl_c20_hdmi_25_175 = {
- .link_bit_rate = 25175,
.clock = 25175,
.tx = { 0xbe88, /* tx cfg0 */
0x9800, /* tx cfg1 */
@@ -1547,7 +1539,6 @@ static const struct intel_c20pll_state mtl_c20_hdmi_25_175 = {
};
static const struct intel_c20pll_state mtl_c20_hdmi_27_0 = {
- .link_bit_rate = 27000,
.clock = 27000,
.tx = { 0xbe88, /* tx cfg0 */
0x9800, /* tx cfg1 */
@@ -1573,7 +1564,6 @@ static const struct intel_c20pll_state mtl_c20_hdmi_27_0 = {
};
static const struct intel_c20pll_state mtl_c20_hdmi_74_25 = {
- .link_bit_rate = 74250,
.clock = 74250,
.tx = { 0xbe88, /* tx cfg0 */
0x9800, /* tx cfg1 */
@@ -1599,7 +1589,6 @@ static const struct intel_c20pll_state mtl_c20_hdmi_74_25 = {
};
static const struct intel_c20pll_state mtl_c20_hdmi_148_5 = {
- .link_bit_rate = 148500,
.clock = 148500,
.tx = { 0xbe88, /* tx cfg0 */
0x9800, /* tx cfg1 */
@@ -1625,7 +1614,6 @@ static const struct intel_c20pll_state mtl_c20_hdmi_148_5 = {
};
static const struct intel_c20pll_state mtl_c20_hdmi_594 = {
- .link_bit_rate = 594000,
.clock = 594000,
.tx = { 0xbe88, /* tx cfg0 */
0x9800, /* tx cfg1 */
@@ -1651,8 +1639,7 @@ static const struct intel_c20pll_state mtl_c20_hdmi_594 = {
};
static const struct intel_c20pll_state mtl_c20_hdmi_300 = {
- .link_bit_rate = 3000000,
- .clock = 166670,
+ .clock = 3000000,
.tx = { 0xbe98, /* tx cfg0 */
0x9800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -1677,8 +1664,7 @@ static const struct intel_c20pll_state mtl_c20_hdmi_300 = {
};
static const struct intel_c20pll_state mtl_c20_hdmi_600 = {
- .link_bit_rate = 6000000,
- .clock = 333330,
+ .clock = 6000000,
.tx = { 0xbe98, /* tx cfg0 */
0x9800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -1703,8 +1689,7 @@ static const struct intel_c20pll_state mtl_c20_hdmi_600 = {
};
static const struct intel_c20pll_state mtl_c20_hdmi_800 = {
- .link_bit_rate = 8000000,
- .clock = 444440,
+ .clock = 8000000,
.tx = { 0xbe98, /* tx cfg0 */
0x9800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -1729,8 +1714,7 @@ static const struct intel_c20pll_state mtl_c20_hdmi_800 = {
};
static const struct intel_c20pll_state mtl_c20_hdmi_1000 = {
- .link_bit_rate = 10000000,
- .clock = 555560,
+ .clock = 10000000,
.tx = { 0xbe98, /* tx cfg0 */
0x9800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -1755,8 +1739,7 @@ static const struct intel_c20pll_state mtl_c20_hdmi_1000 = {
};
static const struct intel_c20pll_state mtl_c20_hdmi_1200 = {
- .link_bit_rate = 12000000,
- .clock = 666670,
+ .clock = 12000000,
.tx = { 0xbe98, /* tx cfg0 */
0x9800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -2005,7 +1988,6 @@ static int intel_c20_compute_hdmi_tmds_pll(u64 pixel_clock, struct intel_c20pll_
else
mpllb_ana_freq_vco = MPLLB_ANA_FREQ_VCO_0;
- pll_state->link_bit_rate = pixel_clock;
pll_state->clock = pixel_clock;
pll_state->tx[0] = 0xbe88;
pll_state->tx[1] = 0x9800;
@@ -2042,7 +2024,7 @@ static int intel_c20_phy_check_hdmi_link_rate(int clock)
int i;
for (i = 0; tables[i]; i++) {
- if (clock == tables[i]->link_bit_rate)
+ if (clock == tables[i]->clock)
return MODE_OK;
}
@@ -2094,7 +2076,7 @@ static int intel_c20pll_calc_state(struct intel_crtc_state *crtc_state,
return -EINVAL;
for (i = 0; tables[i]; i++) {
- if (crtc_state->port_clock == tables[i]->link_bit_rate) {
+ if (crtc_state->port_clock == tables[i]->clock) {
crtc_state->cx0pll_state.c20 = *tables[i];
return 0;
}
@@ -2117,7 +2099,7 @@ int intel_cx0pll_calc_state(struct intel_crtc_state *crtc_state,
static bool intel_c20_use_mplla(u32 clock)
{
/* 10G and 20G rates use MPLLA */
- if (clock == 312500 || clock == 625000)
+ if (clock == 1000000 || clock == 2000000)
return true;
return false;
@@ -2220,11 +2202,11 @@ static u8 intel_c20_get_dp_rate(u32 clock)
return 6;
case 432000: /* 4.32 Gbps eDP */
return 7;
- case 312500: /* 10 Gbps DP2.0 */
+ case 1000000: /* 10 Gbps DP2.0 */
return 8;
- case 421875: /* 13.5 Gbps DP2.0 */
+ case 1350000: /* 13.5 Gbps DP2.0 */
return 9;
- case 625000: /* 20 Gbps DP2.0*/
+ case 2000000: /* 20 Gbps DP2.0 */
return 10;
case 648000: /* 6.48 Gbps eDP*/
return 11;
@@ -2242,13 +2224,13 @@ static u8 intel_c20_get_hdmi_rate(u32 clock)
return 0;
switch (clock) {
- case 166670: /* 3 Gbps */
- case 333330: /* 6 Gbps */
- case 666670: /* 12 Gbps */
+ case 300000: /* 3 Gbps */
+ case 600000: /* 6 Gbps */
+ case 1200000: /* 12 Gbps */
return 1;
- case 444440: /* 8 Gbps */
+ case 800000: /* 8 Gbps */
return 2;
- case 555560: /* 10 Gbps */
+ case 1000000: /* 10 Gbps */
return 3;
default:
MISSING_CASE(clock);
@@ -2259,7 +2241,7 @@ static u8 intel_c20_get_hdmi_rate(u32 clock)
static bool is_dp2(u32 clock)
{
/* DP2.0 clock rates */
- if (clock == 312500 || clock == 421875 || clock == 625000)
+ if (clock == 1000000 || clock == 1350000 || clock == 2000000)
return true;
return false;
@@ -2268,11 +2250,11 @@ static bool is_dp2(u32 clock)
static bool is_hdmi_frl(u32 clock)
{
switch (clock) {
- case 166670: /* 3 Gbps */
- case 333330: /* 6 Gbps */
- case 444440: /* 8 Gbps */
- case 555560: /* 10 Gbps */
- case 666670: /* 12 Gbps */
+ case 300000: /* 3 Gbps */
+ case 600000: /* 6 Gbps */
+ case 800000: /* 8 Gbps */
+ case 1000000: /* 10 Gbps */
+ case 1200000: /* 12 Gbps */
return true;
default:
return false;
@@ -2305,6 +2287,7 @@ static void intel_c20_pll_program(struct drm_i915_private *i915,
const struct intel_c20pll_state *pll_state = &crtc_state->cx0pll_state.c20;
bool dp = false;
int lane = crtc_state->lane_count > 2 ? INTEL_CX0_BOTH_LANES : INTEL_CX0_LANE0;
+ u32 clock = crtc_state->port_clock;
bool cntx;
int i;
@@ -2343,7 +2326,7 @@ static void intel_c20_pll_program(struct drm_i915_private *i915,
}
/* 3.3 mpllb or mplla configuration */
- if (intel_c20_use_mplla(pll_state->clock)) {
+ if (intel_c20_use_mplla(clock)) {
for (i = 0; i < ARRAY_SIZE(pll_state->mplla); i++) {
if (cntx)
intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0,
@@ -2370,23 +2353,23 @@ static void intel_c20_pll_program(struct drm_i915_private *i915,
/* 4. Program custom width to match the link protocol */
intel_cx0_rmw(i915, encoder->port, lane, PHY_C20_VDR_CUSTOM_WIDTH,
PHY_C20_CUSTOM_WIDTH_MASK,
- PHY_C20_CUSTOM_WIDTH(intel_get_c20_custom_width(pll_state->clock, dp)),
+ PHY_C20_CUSTOM_WIDTH(intel_get_c20_custom_width(clock, dp)),
MB_WRITE_COMMITTED);
/* 5. For DP or 6. For HDMI */
if (dp) {
intel_cx0_rmw(i915, encoder->port, lane, PHY_C20_VDR_CUSTOM_SERDES_RATE,
BIT(6) | PHY_C20_CUSTOM_SERDES_MASK,
- BIT(6) | PHY_C20_CUSTOM_SERDES(intel_c20_get_dp_rate(pll_state->clock)),
+ BIT(6) | PHY_C20_CUSTOM_SERDES(intel_c20_get_dp_rate(clock)),
MB_WRITE_COMMITTED);
} else {
intel_cx0_rmw(i915, encoder->port, lane, PHY_C20_VDR_CUSTOM_SERDES_RATE,
BIT(7) | PHY_C20_CUSTOM_SERDES_MASK,
- is_hdmi_frl(pll_state->clock) ? BIT(7) : 0,
+ is_hdmi_frl(clock) ? BIT(7) : 0,
MB_WRITE_COMMITTED);
intel_cx0_write(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C20_VDR_HDMI_RATE,
- intel_c20_get_hdmi_rate(pll_state->clock),
+ intel_c20_get_hdmi_rate(clock),
MB_WRITE_COMMITTED);
}
@@ -2485,7 +2468,8 @@ static void intel_program_port_clock_ctl(struct intel_encoder *encoder,
val |= XELPDP_FORWARD_CLOCK_UNGATE;
- if (is_hdmi_frl(crtc_state->port_clock))
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
+ is_hdmi_frl(crtc_state->port_clock))
val |= XELPDP_DDI_CLOCK_SELECT(XELPDP_DDI_CLOCK_SELECT_DIV18CLK);
else
val |= XELPDP_DDI_CLOCK_SELECT(XELPDP_DDI_CLOCK_SELECT_MAXPCLK);
@@ -3083,24 +3067,29 @@ static void intel_c20pll_state_verify(const struct intel_crtc_state *state,
{
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
const struct intel_c20pll_state *mpll_sw_state = &state->cx0pll_state.c20;
- bool use_mplla;
+ bool sw_use_mpllb = mpll_sw_state->tx[0] & C20_PHY_USE_MPLLB;
+ bool hw_use_mpllb = mpll_hw_state->tx[0] & C20_PHY_USE_MPLLB;
int i;
- use_mplla = intel_c20_use_mplla(mpll_hw_state->clock);
- if (use_mplla) {
- for (i = 0; i < ARRAY_SIZE(mpll_sw_state->mplla); i++) {
- I915_STATE_WARN(i915, mpll_hw_state->mplla[i] != mpll_sw_state->mplla[i],
- "[CRTC:%d:%s] mismatch in C20MPLLA: Register[%d] (expected 0x%04x, found 0x%04x)",
- crtc->base.base.id, crtc->base.name, i,
- mpll_sw_state->mplla[i], mpll_hw_state->mplla[i]);
- }
- } else {
+ I915_STATE_WARN(i915, sw_use_mpllb != hw_use_mpllb,
+ "[CRTC:%d:%s] mismatch in C20: Register MPLLB selection (expected %d, found %d)",
+ crtc->base.base.id, crtc->base.name,
+ sw_use_mpllb, hw_use_mpllb);
+
+ if (hw_use_mpllb) {
for (i = 0; i < ARRAY_SIZE(mpll_sw_state->mpllb); i++) {
I915_STATE_WARN(i915, mpll_hw_state->mpllb[i] != mpll_sw_state->mpllb[i],
"[CRTC:%d:%s] mismatch in C20MPLLB: Register[%d] (expected 0x%04x, found 0x%04x)",
crtc->base.base.id, crtc->base.name, i,
mpll_sw_state->mpllb[i], mpll_hw_state->mpllb[i]);
}
+ } else {
+ for (i = 0; i < ARRAY_SIZE(mpll_sw_state->mplla); i++) {
+ I915_STATE_WARN(i915, mpll_hw_state->mplla[i] != mpll_sw_state->mplla[i],
+ "[CRTC:%d:%s] mismatch in C20MPLLA: Register[%d] (expected 0x%04x, found 0x%04x)",
+ crtc->base.base.id, crtc->base.name, i,
+ mpll_sw_state->mplla[i], mpll_hw_state->mplla[i]);
+ }
}
for (i = 0; i < ARRAY_SIZE(mpll_sw_state->tx); i++) {
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 38f28c480b38..12a29363e5df 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -3672,16 +3672,42 @@ static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
AUDIO_OUTPUT_ENABLE(cpu_transcoder);
}
-void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
- struct intel_crtc_state *crtc_state)
+static int tgl_ddi_min_voltage_level(const struct intel_crtc_state *crtc_state)
{
- if (DISPLAY_VER(dev_priv) >= 12 && crtc_state->port_clock > 594000)
- crtc_state->min_voltage_level = 2;
- else if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) &&
- crtc_state->port_clock > 594000)
- crtc_state->min_voltage_level = 3;
- else if (DISPLAY_VER(dev_priv) >= 11 && crtc_state->port_clock > 594000)
- crtc_state->min_voltage_level = 1;
+ if (crtc_state->port_clock > 594000)
+ return 2;
+ else
+ return 0;
+}
+
+static int jsl_ddi_min_voltage_level(const struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->port_clock > 594000)
+ return 3;
+ else
+ return 0;
+}
+
+static int icl_ddi_min_voltage_level(const struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->port_clock > 594000)
+ return 1;
+ else
+ return 0;
+}
+
+void intel_ddi_compute_min_voltage_level(struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+
+ if (DISPLAY_VER(dev_priv) >= 14)
+ crtc_state->min_voltage_level = icl_ddi_min_voltage_level(crtc_state);
+ else if (DISPLAY_VER(dev_priv) >= 12)
+ crtc_state->min_voltage_level = tgl_ddi_min_voltage_level(crtc_state);
+ else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv))
+ crtc_state->min_voltage_level = jsl_ddi_min_voltage_level(crtc_state);
+ else if (DISPLAY_VER(dev_priv) >= 11)
+ crtc_state->min_voltage_level = icl_ddi_min_voltage_level(crtc_state);
}
static enum transcoder bdw_transcoder_master_readout(struct drm_i915_private *dev_priv,
@@ -3895,7 +3921,7 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
pipe_config->lane_lat_optim_mask =
bxt_ddi_phy_get_lane_lat_optim_mask(encoder);
- intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
+ intel_ddi_compute_min_voltage_level(pipe_config);
intel_hdmi_read_gcp_infoframe(encoder, pipe_config);
@@ -4175,7 +4201,7 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
pipe_config->lane_lat_optim_mask =
bxt_ddi_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
- intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
+ intel_ddi_compute_min_voltage_level(pipe_config);
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index 63853a1f6582..434de7196875 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -70,8 +70,7 @@ void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
bool state);
-void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
- struct intel_crtc_state *crtc_state);
+void intel_ddi_compute_min_voltage_level(struct intel_crtc_state *crtc_state);
int intel_ddi_toggle_hdcp_bits(struct intel_encoder *intel_encoder,
enum transcoder cpu_transcoder,
bool enable, u32 hdcp_mask);
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 7d48bcdd5797..b10aad15a63d 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -3746,8 +3746,8 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
if (!active)
goto out;
- intel_dsc_get_config(pipe_config);
intel_bigjoiner_get_config(pipe_config);
+ intel_dsc_get_config(pipe_config);
if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
DISPLAY_VER(dev_priv) >= 11)
@@ -5926,6 +5926,17 @@ static int intel_async_flip_check_uapi(struct intel_atomic_state *state,
return -EINVAL;
}
+ /*
+ * FIXME: Bigjoiner+async flip is busted currently.
+ * Remove this check once the issues are fixed.
+ */
+ if (new_crtc_state->bigjoiner_pipes) {
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] async flip disallowed with bigjoiner\n",
+ crtc->base.base.id, crtc->base.name);
+ return -EINVAL;
+ }
+
for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
new_plane_state, i) {
if (plane->pipe != crtc->pipe)
@@ -6963,24 +6974,6 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
drm_WARN_ON(&dev_priv->drm, update_pipes);
}
-static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
-{
- struct intel_atomic_state *state, *next;
- struct llist_node *freed;
-
- freed = llist_del_all(&dev_priv->display.atomic_helper.free_list);
- llist_for_each_entry_safe(state, next, freed, freed)
- drm_atomic_state_put(&state->base);
-}
-
-void intel_atomic_helper_free_state_worker(struct work_struct *work)
-{
- struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv), display.atomic_helper.free_work);
-
- intel_atomic_helper_free_state(dev_priv);
-}
-
static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
{
struct drm_i915_private *i915 = to_i915(intel_state->base.dev);
@@ -7016,8 +7009,6 @@ static void intel_atomic_cleanup_work(struct work_struct *work)
drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
drm_atomic_helper_commit_cleanup_done(&state->base);
drm_atomic_state_put(&state->base);
-
- intel_atomic_helper_free_state(i915);
}
static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state)
@@ -7362,7 +7353,7 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
intel_color_cleanup_commit(new_crtc_state);
- drm_atomic_helper_cleanup_planes(dev, &state->base);
+ drm_atomic_helper_unprepare_planes(dev, &state->base);
intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
return ret;
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h
index 7e82b87e9cde..47297ed85822 100644
--- a/drivers/gpu/drm/i915/display/intel_display_core.h
+++ b/drivers/gpu/drm/i915/display/intel_display_core.h
@@ -298,12 +298,6 @@ struct intel_display {
const struct intel_audio_funcs *audio;
} funcs;
- /* Grouping using anonymous structs. Keep sorted. */
- struct intel_atomic_helper {
- struct llist_head free_list;
- struct work_struct free_work;
- } atomic_helper;
-
struct {
/* backlight registers and fields in struct intel_panel */
struct mutex lock;
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h
index 79e9f1c3e241..fe4268813786 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.h
+++ b/drivers/gpu/drm/i915/display/intel_display_device.h
@@ -36,7 +36,7 @@ struct drm_printer;
#define HAS_ASYNC_FLIPS(i915) (DISPLAY_VER(i915) >= 5)
#define HAS_CDCLK_CRAWL(i915) (DISPLAY_INFO(i915)->has_cdclk_crawl)
#define HAS_CDCLK_SQUASH(i915) (DISPLAY_INFO(i915)->has_cdclk_squash)
-#define HAS_CUR_FBC(i915) (!HAS_GMCH(i915) && DISPLAY_VER(i915) >= 7)
+#define HAS_CUR_FBC(i915) (!HAS_GMCH(i915) && IS_DISPLAY_VER(i915, 7, 13))
#define HAS_D12_PLANE_MINIMIZATION(i915) (IS_ROCKETLAKE(i915) || IS_ALDERLAKE_S(i915))
#define HAS_DDI(i915) (DISPLAY_INFO(i915)->has_ddi)
#define HAS_DISPLAY(i915) (DISPLAY_RUNTIME_INFO(i915)->pipe_mask != 0)
diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c
index 62f7b10484be..9df9097a0255 100644
--- a/drivers/gpu/drm/i915/display/intel_display_driver.c
+++ b/drivers/gpu/drm/i915/display/intel_display_driver.c
@@ -259,10 +259,6 @@ int intel_display_driver_probe_noirq(struct drm_i915_private *i915)
if (ret)
goto cleanup_vga_client_pw_domain_dmc;
- init_llist_head(&i915->display.atomic_helper.free_list);
- INIT_WORK(&i915->display.atomic_helper.free_work,
- intel_atomic_helper_free_state_worker);
-
intel_init_quirks(i915);
intel_fbc_init(i915);
@@ -430,9 +426,6 @@ void intel_display_driver_remove(struct drm_i915_private *i915)
flush_workqueue(i915->display.wq.flip);
flush_workqueue(i915->display.wq.modeset);
- flush_work(&i915->display.atomic_helper.free_work);
- drm_WARN_ON(&i915->drm, !llist_empty(&i915->display.atomic_helper.free_list));
-
/*
* MST topology needs to be suspended so we don't have any calls to
* fbdev after it's finalized. MST will be destroyed later as part of
diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c
index f8ed53f30b2e..a7d8f3fc98de 100644
--- a/drivers/gpu/drm/i915/display/intel_display_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_display_irq.c
@@ -340,18 +340,15 @@ static void flip_done_handler(struct drm_i915_private *i915,
enum pipe pipe)
{
struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe);
- struct drm_crtc_state *crtc_state = crtc->base.state;
- struct drm_pending_vblank_event *e = crtc_state->event;
- struct drm_device *dev = &i915->drm;
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev->event_lock, irqflags);
- crtc_state->event = NULL;
+ spin_lock(&i915->drm.event_lock);
- drm_crtc_send_vblank_event(&crtc->base, e);
+ if (crtc->flip_done_event) {
+ drm_crtc_send_vblank_event(&crtc->base, crtc->flip_done_event);
+ crtc->flip_done_event = NULL;
+ }
- spin_unlock_irqrestore(&dev->event_lock, irqflags);
+ spin_unlock(&i915->drm.event_lock);
}
static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index b3e942f2eeb0..3fdd8a517983 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -684,8 +684,6 @@ struct intel_atomic_state {
bool skip_intermediate_wm;
bool rps_interactive;
-
- struct llist_node freed;
};
struct intel_plane_state {
@@ -1022,7 +1020,6 @@ struct intel_c10pll_state {
};
struct intel_c20pll_state {
- u32 link_bit_rate;
u32 clock; /* in kHz */
u16 tx[3];
u16 cmn[4];
@@ -1476,6 +1473,9 @@ struct intel_crtc {
struct intel_crtc_state *config;
+ /* armed event for async flip */
+ struct drm_pending_vblank_event *flip_done_event;
+
/* Access to these should be protected by dev_priv->irq_lock. */
bool cpu_fifo_underrun_disabled;
bool pch_fifo_underrun_disabled;
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index 63e080e07023..b70502586ab9 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -335,77 +335,6 @@ static void disable_event_handler(struct drm_i915_private *i915,
intel_de_write(i915, htp_reg, 0);
}
-static void
-disable_flip_queue_event(struct drm_i915_private *i915,
- i915_reg_t ctl_reg, i915_reg_t htp_reg)
-{
- u32 event_ctl;
- u32 event_htp;
-
- event_ctl = intel_de_read(i915, ctl_reg);
- event_htp = intel_de_read(i915, htp_reg);
- if (event_ctl != (DMC_EVT_CTL_ENABLE |
- DMC_EVT_CTL_RECURRING |
- REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
- DMC_EVT_CTL_TYPE_EDGE_0_1) |
- REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
- DMC_EVT_CTL_EVENT_ID_CLK_MSEC)) ||
- !event_htp) {
- drm_dbg_kms(&i915->drm,
- "Unexpected DMC event configuration (control %08x htp %08x)\n",
- event_ctl, event_htp);
- return;
- }
-
- disable_event_handler(i915, ctl_reg, htp_reg);
-}
-
-static bool
-get_flip_queue_event_regs(struct drm_i915_private *i915, enum intel_dmc_id dmc_id,
- i915_reg_t *ctl_reg, i915_reg_t *htp_reg)
-{
- if (dmc_id == DMC_FW_MAIN) {
- if (DISPLAY_VER(i915) == 12) {
- *ctl_reg = DMC_EVT_CTL(i915, dmc_id, 3);
- *htp_reg = DMC_EVT_HTP(i915, dmc_id, 3);
-
- return true;
- }
- } else if (dmc_id >= DMC_FW_PIPEA && dmc_id <= DMC_FW_PIPED) {
- if (IS_DG2(i915)) {
- *ctl_reg = DMC_EVT_CTL(i915, dmc_id, 2);
- *htp_reg = DMC_EVT_HTP(i915, dmc_id, 2);
-
- return true;
- }
- }
-
- return false;
-}
-
-static void
-disable_all_flip_queue_events(struct drm_i915_private *i915)
-{
- enum intel_dmc_id dmc_id;
-
- /* TODO: check if the following applies to all D13+ platforms. */
- if (!IS_DG2(i915) && !IS_TIGERLAKE(i915))
- return;
-
- for_each_dmc_id(dmc_id) {
- i915_reg_t ctl_reg;
- i915_reg_t htp_reg;
-
- if (!has_dmc_id_fw(i915, dmc_id))
- continue;
-
- if (!get_flip_queue_event_regs(i915, dmc_id, &ctl_reg, &htp_reg))
- continue;
-
- disable_flip_queue_event(i915, ctl_reg, htp_reg);
- }
-}
-
static void disable_all_event_handlers(struct drm_i915_private *i915)
{
enum intel_dmc_id dmc_id;
@@ -493,6 +422,65 @@ void intel_dmc_disable_pipe(struct drm_i915_private *i915, enum pipe pipe)
intel_de_rmw(i915, PIPEDMC_CONTROL(pipe), PIPEDMC_ENABLE, 0);
}
+static bool is_dmc_evt_ctl_reg(struct drm_i915_private *i915,
+ enum intel_dmc_id dmc_id, i915_reg_t reg)
+{
+ u32 offset = i915_mmio_reg_offset(reg);
+ u32 start = i915_mmio_reg_offset(DMC_EVT_CTL(i915, dmc_id, 0));
+ u32 end = i915_mmio_reg_offset(DMC_EVT_CTL(i915, dmc_id, DMC_EVENT_HANDLER_COUNT_GEN12));
+
+ return offset >= start && offset < end;
+}
+
+static bool is_dmc_evt_htp_reg(struct drm_i915_private *i915,
+ enum intel_dmc_id dmc_id, i915_reg_t reg)
+{
+ u32 offset = i915_mmio_reg_offset(reg);
+ u32 start = i915_mmio_reg_offset(DMC_EVT_HTP(i915, dmc_id, 0));
+ u32 end = i915_mmio_reg_offset(DMC_EVT_HTP(i915, dmc_id, DMC_EVENT_HANDLER_COUNT_GEN12));
+
+ return offset >= start && offset < end;
+}
+
+static bool disable_dmc_evt(struct drm_i915_private *i915,
+ enum intel_dmc_id dmc_id,
+ i915_reg_t reg, u32 data)
+{
+ if (!is_dmc_evt_ctl_reg(i915, dmc_id, reg))
+ return false;
+
+ /* keep all pipe DMC events disabled by default */
+ if (dmc_id != DMC_FW_MAIN)
+ return true;
+
+ /* also disable the flip queue event on the main DMC on TGL */
+ if (IS_TIGERLAKE(i915) &&
+ REG_FIELD_GET(DMC_EVT_CTL_EVENT_ID_MASK, data) == DMC_EVT_CTL_EVENT_ID_CLK_MSEC)
+ return true;
+
+ /* also disable the HRR event on the main DMC on TGL/ADLS */
+ if ((IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915)) &&
+ REG_FIELD_GET(DMC_EVT_CTL_EVENT_ID_MASK, data) == DMC_EVT_CTL_EVENT_ID_VBLANK_A)
+ return true;
+
+ return false;
+}
+
+static u32 dmc_mmiodata(struct drm_i915_private *i915,
+ struct intel_dmc *dmc,
+ enum intel_dmc_id dmc_id, int i)
+{
+ if (disable_dmc_evt(i915, dmc_id,
+ dmc->dmc_info[dmc_id].mmioaddr[i],
+ dmc->dmc_info[dmc_id].mmiodata[i]))
+ return REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
+ DMC_EVT_CTL_TYPE_EDGE_0_1) |
+ REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
+ DMC_EVT_CTL_EVENT_ID_FALSE);
+ else
+ return dmc->dmc_info[dmc_id].mmiodata[i];
+}
+
/**
* intel_dmc_load_program() - write the firmware from memory to register.
* @i915: i915 drm device.
@@ -532,7 +520,7 @@ void intel_dmc_load_program(struct drm_i915_private *i915)
for_each_dmc_id(dmc_id) {
for (i = 0; i < dmc->dmc_info[dmc_id].mmio_count; i++) {
intel_de_write(i915, dmc->dmc_info[dmc_id].mmioaddr[i],
- dmc->dmc_info[dmc_id].mmiodata[i]);
+ dmc_mmiodata(i915, dmc, dmc_id, i));
}
}
@@ -540,13 +528,6 @@ void intel_dmc_load_program(struct drm_i915_private *i915)
gen9_set_dc_state_debugmask(i915);
- /*
- * Flip queue events need to be disabled before enabling DC5/6.
- * i915 doesn't use the flip queue feature, so disable it already
- * here.
- */
- disable_all_flip_queue_events(i915);
-
pipedmc_clock_gating_wa(i915, false);
}
@@ -742,9 +723,17 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
return 0;
}
+ drm_dbg_kms(&i915->drm, "DMC %d:\n", dmc_id);
for (i = 0; i < mmio_count; i++) {
dmc_info->mmioaddr[i] = _MMIO(mmioaddr[i]);
dmc_info->mmiodata[i] = mmiodata[i];
+
+ drm_dbg_kms(&i915->drm, " mmio[%d]: 0x%x = 0x%x%s%s\n",
+ i, mmioaddr[i], mmiodata[i],
+ is_dmc_evt_ctl_reg(i915, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_CTL)" :
+ is_dmc_evt_htp_reg(i915, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_HTP)" : "",
+ disable_dmc_evt(i915, dmc_id, dmc_info->mmioaddr[i],
+ dmc_info->mmiodata[i]) ? " (disabling)" : "");
}
dmc_info->mmio_count = mmio_count;
dmc_info->start_mmioaddr = start_mmioaddr;
diff --git a/drivers/gpu/drm/i915/display/intel_dmc_regs.h b/drivers/gpu/drm/i915/display/intel_dmc_regs.h
index cf10094acae3..90d0dbb41cfe 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_dmc_regs.h
@@ -60,6 +60,7 @@
#define DMC_EVT_CTL_EVENT_ID_MASK REG_GENMASK(15, 8)
#define DMC_EVT_CTL_EVENT_ID_FALSE 0x01
+#define DMC_EVT_CTL_EVENT_ID_VBLANK_A 0x32 /* main DMC */
/* An event handler scheduled to run at a 1 kHz frequency. */
#define DMC_EVT_CTL_EVENT_ID_CLK_MSEC 0xbf
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 3b2482bf683f..f5ef95da5534 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -2101,7 +2101,7 @@ static int intel_dp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp,
}
}
- dsc_max_bpc = intel_dp_dsc_min_src_input_bpc(i915);
+ dsc_max_bpc = intel_dp_dsc_max_src_input_bpc(i915);
if (!dsc_max_bpc)
return -EINVAL;
@@ -4764,7 +4764,7 @@ static void intel_dp_process_phy_request(struct intel_dp *intel_dp,
intel_dp->train_set, crtc_state->lane_count);
drm_dp_set_phy_test_pattern(&intel_dp->aux, data,
- link_status[DP_DPCD_REV]);
+ intel_dp->dpcd[DP_DPCD_REV]);
}
static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index dbc1b66c8ee4..1abfafbbfa75 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -650,19 +650,30 @@ intel_dp_update_link_bw_set(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
u8 link_bw, u8 rate_select)
{
- u8 link_config[2];
+ u8 lane_count = crtc_state->lane_count;
- /* Write the link configuration data */
- link_config[0] = link_bw;
- link_config[1] = crtc_state->lane_count;
if (crtc_state->enhanced_framing)
- link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
- drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
+ lane_count |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+
+ if (link_bw) {
+ /* DP and eDP v1.3 and earlier link bw set method. */
+ u8 link_config[] = { link_bw, lane_count };
- /* eDP 1.4 rate select method. */
- if (!link_bw)
- drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
- &rate_select, 1);
+ drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config,
+ ARRAY_SIZE(link_config));
+ } else {
+ /*
+ * eDP v1.4 and later link rate set method.
+ *
+ * eDP v1.4x sinks shall ignore DP_LINK_RATE_SET if
+ * DP_LINK_BW_SET is set. Avoid writing DP_LINK_BW_SET.
+ *
+ * eDP v1.5 sinks allow choosing either, and the last choice
+ * shall be active.
+ */
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_LANE_COUNT_SET, lane_count);
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_LINK_RATE_SET, rate_select);
+ }
}
/*
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index e8940acea8ad..8a9432335030 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -614,7 +614,7 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
intel_dp_audio_compute_config(encoder, pipe_config, conn_state);
- intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
+ intel_ddi_compute_min_voltage_level(pipe_config);
intel_psr_compute_config(intel_dp, pipe_config, conn_state);
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index 69c3cfe3120e..0c0144eaa8fa 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -771,7 +771,7 @@ bool intel_fb_modifier_uses_dpt(struct drm_i915_private *i915, u64 modifier)
bool intel_fb_uses_dpt(const struct drm_framebuffer *fb)
{
- return fb && to_i915(fb->dev)->display.params.enable_dpt &&
+ return to_i915(fb->dev)->display.params.enable_dpt &&
intel_fb_modifier_uses_dpt(to_i915(fb->dev), fb->modifier);
}
@@ -1509,8 +1509,20 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p
size += remap_info->size;
} else {
- unsigned int dst_stride = plane_view_dst_stride_tiles(fb, color_plane,
- remap_info->width);
+ unsigned int dst_stride;
+
+ /*
+ * The hardware automagically calculates the CCS AUX surface
+ * stride from the main surface stride so can't really remap a
+ * smaller subset (unless we'd remap in whole AUX page units).
+ */
+ if (intel_fb_needs_pot_stride_remap(fb) &&
+ intel_fb_is_ccs_modifier(fb->base.modifier))
+ dst_stride = remap_info->src_stride;
+ else
+ dst_stride = remap_info->width;
+
+ dst_stride = plane_view_dst_stride_tiles(fb, color_plane, dst_stride);
assign_chk_ovf(i915, remap_info->dst_stride, dst_stride);
color_plane_info->mapping_stride = dst_stride *
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index 40d7b6f3f489..e9e4dcf345f9 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -899,7 +899,6 @@ int intel_gmbus_setup(struct drm_i915_private *i915)
}
bus->adapter.owner = THIS_MODULE;
- bus->adapter.class = I2C_CLASS_DDC;
snprintf(bus->adapter.name,
sizeof(bus->adapter.name),
"i915 gmbus %s", gmbus_pin->name);
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 6029bb71276c..8f702c3fc62d 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -891,13 +891,13 @@ transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder cpu_trans
return false;
}
-static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate)
+static u32 intel_get_frame_time_us(const struct intel_crtc_state *crtc_state)
{
- if (!cstate || !cstate->hw.active)
+ if (!crtc_state->hw.active)
return 0;
return DIV_ROUND_UP(1000 * 1000,
- drm_mode_vrefresh(&cstate->hw.adjusted_mode));
+ drm_mode_vrefresh(&crtc_state->hw.adjusted_mode));
}
static void psr2_program_idle_frames(struct intel_dp *intel_dp,
@@ -3319,11 +3319,11 @@ void intel_psr_connector_debugfs_add(struct intel_connector *connector)
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct dentry *root = connector->base.debugfs_entry;
- if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP) {
- if (!(HAS_DP20(i915) &&
- connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort))
- return;
- }
+ /* TODO: Add support for MST connectors as well. */
+ if ((connector->base.connector_type != DRM_MODE_CONNECTOR_eDP &&
+ connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) ||
+ connector->mst_port)
+ return;
debugfs_create_file("i915_psr_sink_status", 0444, root,
connector, &i915_psr_sink_status_fops);
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 9218047495fb..acc6b6804105 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -3327,7 +3327,6 @@ intel_sdvo_init_ddc_proxy(struct intel_sdvo_ddc *ddc,
ddc->ddc_bus = ddc_bus;
ddc->ddc.owner = THIS_MODULE;
- ddc->ddc.class = I2C_CLASS_DDC;
snprintf(ddc->ddc.name, I2C_NAME_SIZE, "SDVO %c DDC%d",
port_name(sdvo->base.port), ddc_bus);
ddc->ddc.dev.parent = &pdev->dev;
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index f64d348a969e..dcf05e00e505 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -1030,18 +1030,25 @@ static bool xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enabl
__xelpdp_tc_phy_enable_tcss_power(tc, enable);
- if ((!tc_phy_wait_for_ready(tc) ||
- !xelpdp_tc_phy_wait_for_tcss_power(tc, enable)) &&
- !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) {
- if (enable) {
- __xelpdp_tc_phy_enable_tcss_power(tc, false);
- xelpdp_tc_phy_wait_for_tcss_power(tc, false);
- }
+ if (enable && !tc_phy_wait_for_ready(tc))
+ goto out_disable;
- return false;
- }
+ if (!xelpdp_tc_phy_wait_for_tcss_power(tc, enable))
+ goto out_disable;
return true;
+
+out_disable:
+ if (drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY))
+ return false;
+
+ if (!enable)
+ return false;
+
+ __xelpdp_tc_phy_enable_tcss_power(tc, false);
+ xelpdp_tc_phy_wait_for_tcss_power(tc, false);
+
+ return false;
}
static void xelpdp_tc_phy_take_ownership(struct intel_tc_port *tc, bool take)
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index 2ee4f0d95851..d4386cb3569e 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -1417,9 +1417,6 @@ set_tv_mode_timings(struct drm_i915_private *dev_priv,
static void set_color_conversion(struct drm_i915_private *dev_priv,
const struct color_conversion *color_conversion)
{
- if (!color_conversion)
- return;
-
intel_de_write(dev_priv, TV_CSC_Y,
(color_conversion->ry << 16) | color_conversion->gy);
intel_de_write(dev_priv, TV_CSC_Y2,
@@ -1454,9 +1451,6 @@ static void intel_tv_pre_enable(struct intel_atomic_state *state,
int xpos, ypos;
unsigned int xsize, ysize;
- if (!tv_mode)
- return; /* can't happen (mode_prepare prevents this) */
-
tv_ctl = intel_de_read(dev_priv, TV_CTL);
tv_ctl &= TV_CTL_SAVE;
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index 5f2fb702e367..17d6572f9d0a 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -812,13 +812,13 @@ void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state)
}
static u32 intel_dsc_pps_read(struct intel_crtc_state *crtc_state, int pps,
- bool *check_equal)
+ bool *all_equal)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
i915_reg_t dsc_reg[2];
int i, vdsc_per_pipe, dsc_reg_num;
- u32 val = 0;
+ u32 val;
vdsc_per_pipe = intel_dsc_get_vdsc_per_pipe(crtc_state);
dsc_reg_num = min_t(int, ARRAY_SIZE(dsc_reg), vdsc_per_pipe);
@@ -827,20 +827,13 @@ static u32 intel_dsc_pps_read(struct intel_crtc_state *crtc_state, int pps,
intel_dsc_get_pps_reg(crtc_state, pps, dsc_reg, dsc_reg_num);
- if (check_equal)
- *check_equal = true;
-
- for (i = 0; i < dsc_reg_num; i++) {
- u32 tmp;
+ *all_equal = true;
- tmp = intel_de_read(i915, dsc_reg[i]);
+ val = intel_de_read(i915, dsc_reg[0]);
- if (i == 0) {
- val = tmp;
- } else if (check_equal && tmp != val) {
- *check_equal = false;
- break;
- } else if (!check_equal) {
+ for (i = 1; i < dsc_reg_num; i++) {
+ if (intel_de_read(i915, dsc_reg[i]) != val) {
+ *all_equal = false;
break;
}
}
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c
index 1e7c97243fcf..8a934bada624 100644
--- a/drivers/gpu/drm/i915/display/skl_scaler.c
+++ b/drivers/gpu/drm/i915/display/skl_scaler.c
@@ -504,7 +504,6 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
{
struct drm_plane *plane = NULL;
struct intel_plane *intel_plane;
- struct intel_plane_state *plane_state = NULL;
struct intel_crtc_scaler_state *scaler_state =
&crtc_state->scaler_state;
struct drm_atomic_state *drm_state = crtc_state->uapi.state;
@@ -536,6 +535,7 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
/* walkthrough scaler_users bits and start assigning scalers */
for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) {
+ struct intel_plane_state *plane_state = NULL;
int *scaler_id;
const char *name;
int idx, ret;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index e38f06a6e56e..dcbfe32fd30c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -279,7 +279,8 @@ static int proto_context_set_protected(struct drm_i915_private *i915,
}
static struct i915_gem_proto_context *
-proto_context_create(struct drm_i915_private *i915, unsigned int flags)
+proto_context_create(struct drm_i915_file_private *fpriv,
+ struct drm_i915_private *i915, unsigned int flags)
{
struct i915_gem_proto_context *pc, *err;
@@ -287,6 +288,7 @@ proto_context_create(struct drm_i915_private *i915, unsigned int flags)
if (!pc)
return ERR_PTR(-ENOMEM);
+ pc->fpriv = fpriv;
pc->num_user_engines = -1;
pc->user_engines = NULL;
pc->user_flags = BIT(UCONTEXT_BANNABLE) |
@@ -1622,6 +1624,7 @@ i915_gem_create_context(struct drm_i915_private *i915,
err = PTR_ERR(ppgtt);
goto err_ctx;
}
+ ppgtt->vm.fpriv = pc->fpriv;
vm = &ppgtt->vm;
}
if (vm)
@@ -1741,7 +1744,7 @@ int i915_gem_context_open(struct drm_i915_private *i915,
/* 0 reserved for invalid/unassigned ppgtt */
xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC1);
- pc = proto_context_create(i915, 0);
+ pc = proto_context_create(file_priv, i915, 0);
if (IS_ERR(pc)) {
err = PTR_ERR(pc);
goto err;
@@ -1823,6 +1826,7 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
args->vm_id = id;
+ ppgtt->vm.fpriv = file_priv;
return 0;
err_put:
@@ -2285,7 +2289,8 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
return -EIO;
}
- ext_data.pc = proto_context_create(i915, args->flags);
+ ext_data.pc = proto_context_create(file->driver_priv, i915,
+ args->flags);
if (IS_ERR(ext_data.pc))
return PTR_ERR(ext_data.pc);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index cb78214a7dcd..03bc7f9d191b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -188,6 +188,9 @@ struct i915_gem_proto_engine {
* CONTEXT_CREATE_SET_PARAM during GEM_CONTEXT_CREATE.
*/
struct i915_gem_proto_context {
+ /** @fpriv: Client which creates the context */
+ struct drm_i915_file_private *fpriv;
+
/** @vm: See &i915_gem_context.vm */
struct i915_address_space *vm;
@@ -409,9 +412,9 @@ struct i915_gem_context {
/** @stale: tracks stale engines to be destroyed */
struct {
- /** @lock: guards engines */
+ /** @stale.lock: guards engines */
spinlock_t lock;
- /** @engines: list of stale engines */
+ /** @stale.engines: list of stale engines */
struct list_head engines;
} stale;
};
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index ccc077b74d2d..555022c0652c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -254,6 +254,8 @@ struct i915_execbuffer {
struct intel_gt *gt; /* gt for the execbuf */
struct intel_context *context; /* logical state for the request */
struct i915_gem_context *gem_context; /** caller's context */
+ intel_wakeref_t wakeref;
+ intel_wakeref_t wakeref_gt0;
/** our requests to build */
struct i915_request *requests[MAX_ENGINE_INSTANCE + 1];
@@ -1157,7 +1159,7 @@ static void reloc_cache_unmap(struct reloc_cache *cache)
vaddr = unmask_page(cache->vaddr);
if (cache->vaddr & KMAP)
- kunmap_atomic(vaddr);
+ kunmap_local(vaddr);
else
io_mapping_unmap_atomic((void __iomem *)vaddr);
}
@@ -1173,7 +1175,7 @@ static void reloc_cache_remap(struct reloc_cache *cache,
if (cache->vaddr & KMAP) {
struct page *page = i915_gem_object_get_page(obj, cache->page);
- vaddr = kmap_atomic(page);
+ vaddr = kmap_local_page(page);
cache->vaddr = unmask_flags(cache->vaddr) |
(unsigned long)vaddr;
} else {
@@ -1203,7 +1205,7 @@ static void reloc_cache_reset(struct reloc_cache *cache, struct i915_execbuffer
if (cache->vaddr & CLFLUSH_AFTER)
mb();
- kunmap_atomic(vaddr);
+ kunmap_local(vaddr);
i915_gem_object_finish_access(obj);
} else {
struct i915_ggtt *ggtt = cache_to_ggtt(cache);
@@ -1235,7 +1237,7 @@ static void *reloc_kmap(struct drm_i915_gem_object *obj,
struct page *page;
if (cache->vaddr) {
- kunmap_atomic(unmask_page(cache->vaddr));
+ kunmap_local(unmask_page(cache->vaddr));
} else {
unsigned int flushes;
int err;
@@ -1257,7 +1259,7 @@ static void *reloc_kmap(struct drm_i915_gem_object *obj,
if (!obj->mm.dirty)
set_page_dirty(page);
- vaddr = kmap_atomic(page);
+ vaddr = kmap_local_page(page);
cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
cache->page = pageno;
@@ -1679,7 +1681,7 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb)
urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr);
size = nreloc * sizeof(*relocs);
- relocs = kvmalloc_array(size, 1, GFP_KERNEL);
+ relocs = kvmalloc_array(1, size, GFP_KERNEL);
if (!relocs) {
err = -ENOMEM;
goto err;
@@ -2720,13 +2722,13 @@ eb_select_engine(struct i915_execbuffer *eb)
for_each_child(ce, child)
intel_context_get(child);
- intel_gt_pm_get(gt);
+ eb->wakeref = intel_gt_pm_get(ce->engine->gt);
/*
* Keep GT0 active on MTL so that i915_vma_parked() doesn't
* free VMAs while execbuf ioctl is validating VMAs.
*/
if (gt->info.id)
- intel_gt_pm_get(to_gt(gt->i915));
+ eb->wakeref_gt0 = intel_gt_pm_get(to_gt(gt->i915));
if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
err = intel_context_alloc_state(ce);
@@ -2766,9 +2768,9 @@ eb_select_engine(struct i915_execbuffer *eb)
err:
if (gt->info.id)
- intel_gt_pm_put(to_gt(gt->i915));
+ intel_gt_pm_put(to_gt(gt->i915), eb->wakeref_gt0);
- intel_gt_pm_put(gt);
+ intel_gt_pm_put(ce->engine->gt, eb->wakeref);
for_each_child(ce, child)
intel_context_put(child);
intel_context_put(ce);
@@ -2786,8 +2788,8 @@ eb_put_engine(struct i915_execbuffer *eb)
* i915_vma_parked() from interfering while execbuf validates vmas.
*/
if (eb->gt->info.id)
- intel_gt_pm_put(to_gt(eb->gt->i915));
- intel_gt_pm_put(eb->gt);
+ intel_gt_pm_put(to_gt(eb->gt->i915), eb->wakeref_gt0);
+ intel_gt_pm_put(eb->context->engine->gt, eb->wakeref);
for_each_child(eb->context, child)
intel_context_put(child);
intel_context_put(eb->context);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
index 6bc26b4b06b8..ea7561ae6e13 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
@@ -36,7 +36,7 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
struct sg_table *st;
struct scatterlist *sg;
unsigned int npages; /* restricted by sg_alloc_table */
- int max_order = MAX_ORDER;
+ int max_order = MAX_PAGE_ORDER;
unsigned int max_segment;
gfp_t gfp;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index c26d87555825..58e6c680fe0d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -106,6 +106,10 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
INIT_LIST_HEAD(&obj->mm.link);
+#ifdef CONFIG_PROC_FS
+ INIT_LIST_HEAD(&obj->client_link);
+#endif
+
INIT_LIST_HEAD(&obj->lut_list);
spin_lock_init(&obj->lut_lock);
@@ -293,6 +297,10 @@ void __i915_gem_free_object_rcu(struct rcu_head *head)
container_of(head, typeof(*obj), rcu);
struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ /* We need to keep this alive for RCU read access from fdinfo. */
+ if (obj->mm.n_placements > 1)
+ kfree(obj->mm.placements);
+
i915_gem_object_free(obj);
GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
@@ -389,9 +397,6 @@ void __i915_gem_free_object(struct drm_i915_gem_object *obj)
if (obj->ops->release)
obj->ops->release(obj);
- if (obj->mm.n_placements > 1)
- kfree(obj->mm.placements);
-
if (obj->shares_resv_from)
i915_vm_resv_put(obj->shares_resv_from);
@@ -442,6 +447,8 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj)
GEM_BUG_ON(i915_gem_object_is_framebuffer(obj));
+ i915_drm_client_remove_object(obj);
+
/*
* Before we free the object, make sure any pure RCU-only
* read-side critical sections are complete, e.g.
@@ -493,17 +500,15 @@ static void
i915_gem_object_read_from_page_kmap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
{
pgoff_t idx = offset >> PAGE_SHIFT;
- void *src_map;
void *src_ptr;
- src_map = kmap_atomic(i915_gem_object_get_page(obj, idx));
-
- src_ptr = src_map + offset_in_page(offset);
+ src_ptr = kmap_local_page(i915_gem_object_get_page(obj, idx))
+ + offset_in_page(offset);
if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
drm_clflush_virt_range(src_ptr, size);
memcpy(dst, src_ptr, size);
- kunmap_atomic(src_map);
+ kunmap_local(src_ptr);
}
static void
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 2292404007c8..0c5cdab278b6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -302,6 +302,18 @@ struct drm_i915_gem_object {
*/
struct i915_address_space *shares_resv_from;
+#ifdef CONFIG_PROC_FS
+ /**
+ * @client: @i915_drm_client which created the object
+ */
+ struct i915_drm_client *client;
+
+ /**
+ * @client_link: Link into @i915_drm_client.objects_list
+ */
+ struct list_head client_link;
+#endif
+
union {
struct rcu_head rcu;
struct llist_node freed;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index 5df128e2f4dc..ef85c6dc9fd5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -65,16 +65,13 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
dst = vaddr;
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
struct page *page;
- void *src;
page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page))
goto err_st;
- src = kmap_atomic(page);
- memcpy(dst, src, PAGE_SIZE);
+ memcpy_from_page(dst, page, 0, PAGE_SIZE);
drm_clflush_virt_range(dst, PAGE_SIZE);
- kunmap_atomic(src);
put_page(page);
dst += PAGE_SIZE;
@@ -113,16 +110,13 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
struct page *page;
- char *dst;
page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page))
continue;
- dst = kmap_atomic(page);
drm_clflush_virt_range(src, PAGE_SIZE);
- memcpy(dst, src, PAGE_SIZE);
- kunmap_atomic(dst);
+ memcpy_to_page(page, 0, src, PAGE_SIZE);
set_page_dirty(page);
if (obj->mm.madv == I915_MADV_WILLNEED)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 73a4a4eb29e0..38b72d86560f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -485,11 +485,13 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
if (err < 0)
return err;
- vaddr = kmap_atomic(page);
+ vaddr = kmap_local_page(page);
+ pagefault_disable();
unwritten = __copy_from_user_inatomic(vaddr + pg,
user_data,
len);
- kunmap_atomic(vaddr);
+ pagefault_enable();
+ kunmap_local(vaddr);
err = aops->write_end(obj->base.filp, mapping, offset, len,
len - unwritten, page, data);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 1a766d8e7cce..8c88075eeab2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -386,6 +386,27 @@ static void icl_get_stolen_reserved(struct drm_i915_private *i915,
drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val);
+ /* Wa_14019821291 */
+ if (MEDIA_VER_FULL(i915) == IP_VER(13, 0)) {
+ /*
+ * This workaround is primarily implemented by the BIOS. We
+ * just need to figure out whether the BIOS has applied the
+ * workaround (meaning the programmed address falls within
+ * the DSM) and, if so, reserve that part of the DSM to
+ * prevent accidental reuse. The DSM location should be just
+ * below the WOPCM.
+ */
+ u64 gscpsmi_base = intel_uncore_read64_2x32(uncore,
+ MTL_GSCPSMI_BASEADDR_LSB,
+ MTL_GSCPSMI_BASEADDR_MSB);
+ if (gscpsmi_base >= i915->dsm.stolen.start &&
+ gscpsmi_base < i915->dsm.stolen.end) {
+ *base = gscpsmi_base;
+ *size = i915->dsm.stolen.end - gscpsmi_base;
+ return;
+ }
+ }
+
switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
case GEN8_STOLEN_RESERVED_1M:
*size = 1024 * 1024;
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index 6b9f6cf50bf6..3ff3d8889c6c 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -115,7 +115,7 @@ static int get_huge_pages(struct drm_i915_gem_object *obj)
do {
struct page *page;
- GEM_BUG_ON(order > MAX_ORDER);
+ GEM_BUG_ON(order > MAX_PAGE_ORDER);
page = alloc_pages(GFP | __GFP_ZERO, order);
if (!page)
goto err;
@@ -1082,7 +1082,7 @@ __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
goto err_unlock;
for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
- u32 *ptr = kmap_atomic(i915_gem_object_get_page(obj, n));
+ u32 *ptr = kmap_local_page(i915_gem_object_get_page(obj, n));
if (needs_flush & CLFLUSH_BEFORE)
drm_clflush_virt_range(ptr, PAGE_SIZE);
@@ -1090,12 +1090,12 @@ __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
if (ptr[dword] != val) {
pr_err("n=%lu ptr[%u]=%u, val=%u\n",
n, dword, ptr[dword], val);
- kunmap_atomic(ptr);
+ kunmap_local(ptr);
err = -EINVAL;
break;
}
- kunmap_atomic(ptr);
+ kunmap_local(ptr);
}
i915_gem_object_finish_access(obj);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
index 3bef1beec7cb..2a0c0634d446 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
@@ -24,7 +24,6 @@ static int cpu_set(struct context *ctx, unsigned long offset, u32 v)
{
unsigned int needs_clflush;
struct page *page;
- void *map;
u32 *cpu;
int err;
@@ -34,8 +33,7 @@ static int cpu_set(struct context *ctx, unsigned long offset, u32 v)
goto out;
page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT);
- map = kmap_atomic(page);
- cpu = map + offset_in_page(offset);
+ cpu = kmap_local_page(page) + offset_in_page(offset);
if (needs_clflush & CLFLUSH_BEFORE)
drm_clflush_virt_range(cpu, sizeof(*cpu));
@@ -45,7 +43,7 @@ static int cpu_set(struct context *ctx, unsigned long offset, u32 v)
if (needs_clflush & CLFLUSH_AFTER)
drm_clflush_virt_range(cpu, sizeof(*cpu));
- kunmap_atomic(map);
+ kunmap_local(cpu);
i915_gem_object_finish_access(ctx->obj);
out:
@@ -57,7 +55,6 @@ static int cpu_get(struct context *ctx, unsigned long offset, u32 *v)
{
unsigned int needs_clflush;
struct page *page;
- void *map;
u32 *cpu;
int err;
@@ -67,15 +64,14 @@ static int cpu_get(struct context *ctx, unsigned long offset, u32 *v)
goto out;
page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT);
- map = kmap_atomic(page);
- cpu = map + offset_in_page(offset);
+ cpu = kmap_local_page(page) + offset_in_page(offset);
if (needs_clflush & CLFLUSH_BEFORE)
drm_clflush_virt_range(cpu, sizeof(*cpu));
*v = *cpu;
- kunmap_atomic(map);
+ kunmap_local(cpu);
i915_gem_object_finish_access(ctx->obj);
out:
@@ -85,6 +81,7 @@ out:
static int gtt_set(struct context *ctx, unsigned long offset, u32 v)
{
+ intel_wakeref_t wakeref;
struct i915_vma *vma;
u32 __iomem *map;
int err = 0;
@@ -99,7 +96,7 @@ static int gtt_set(struct context *ctx, unsigned long offset, u32 v)
if (IS_ERR(vma))
return PTR_ERR(vma);
- intel_gt_pm_get(vma->vm->gt);
+ wakeref = intel_gt_pm_get(vma->vm->gt);
map = i915_vma_pin_iomap(vma);
i915_vma_unpin(vma);
@@ -112,12 +109,13 @@ static int gtt_set(struct context *ctx, unsigned long offset, u32 v)
i915_vma_unpin_iomap(vma);
out_rpm:
- intel_gt_pm_put(vma->vm->gt);
+ intel_gt_pm_put(vma->vm->gt, wakeref);
return err;
}
static int gtt_get(struct context *ctx, unsigned long offset, u32 *v)
{
+ intel_wakeref_t wakeref;
struct i915_vma *vma;
u32 __iomem *map;
int err = 0;
@@ -132,7 +130,7 @@ static int gtt_get(struct context *ctx, unsigned long offset, u32 *v)
if (IS_ERR(vma))
return PTR_ERR(vma);
- intel_gt_pm_get(vma->vm->gt);
+ wakeref = intel_gt_pm_get(vma->vm->gt);
map = i915_vma_pin_iomap(vma);
i915_vma_unpin(vma);
@@ -145,7 +143,7 @@ static int gtt_get(struct context *ctx, unsigned long offset, u32 *v)
i915_vma_unpin_iomap(vma);
out_rpm:
- intel_gt_pm_put(vma->vm->gt);
+ intel_gt_pm_put(vma->vm->gt, wakeref);
return err;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index 7021b6e9b219..89d4dc8b60c6 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -489,12 +489,12 @@ static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
for (n = 0; n < real_page_count(obj); n++) {
u32 *map;
- map = kmap_atomic(i915_gem_object_get_page(obj, n));
+ map = kmap_local_page(i915_gem_object_get_page(obj, n));
for (m = 0; m < DW_PER_PAGE; m++)
map[m] = value;
if (!has_llc)
drm_clflush_virt_range(map, PAGE_SIZE);
- kunmap_atomic(map);
+ kunmap_local(map);
}
i915_gem_object_finish_access(obj);
@@ -520,7 +520,7 @@ static noinline int cpu_check(struct drm_i915_gem_object *obj,
for (n = 0; n < real_page_count(obj); n++) {
u32 *map, m;
- map = kmap_atomic(i915_gem_object_get_page(obj, n));
+ map = kmap_local_page(i915_gem_object_get_page(obj, n));
if (needs_flush & CLFLUSH_BEFORE)
drm_clflush_virt_range(map, PAGE_SIZE);
@@ -546,7 +546,7 @@ static noinline int cpu_check(struct drm_i915_gem_object *obj,
}
out_unmap:
- kunmap_atomic(map);
+ kunmap_local(map);
if (err)
break;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
index e57f9390076c..d684a70f2c04 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
@@ -504,7 +504,7 @@ static int igt_dmabuf_export_vmap(void *arg)
}
if (memchr_inv(ptr, 0, dmabuf->size)) {
- pr_err("Exported object not initialiased to zero!\n");
+ pr_err("Exported object not initialised to zero!\n");
err = -EINVAL;
goto out;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 72957a36a36b..2c51a2c452fc 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -630,14 +630,14 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
static void disable_retire_worker(struct drm_i915_private *i915)
{
i915_gem_driver_unregister__shrinker(i915);
- intel_gt_pm_get(to_gt(i915));
+ intel_gt_pm_get_untracked(to_gt(i915));
cancel_delayed_work_sync(&to_gt(i915)->requests.retire_work);
}
static void restore_retire_worker(struct drm_i915_private *i915)
{
igt_flush_test(i915);
- intel_gt_pm_put(to_gt(i915));
+ intel_gt_pm_put_untracked(to_gt(i915));
i915_gem_driver_register__shrinker(i915);
}
@@ -778,6 +778,7 @@ err_obj:
static int gtt_set(struct drm_i915_gem_object *obj)
{
+ intel_wakeref_t wakeref;
struct i915_vma *vma;
void __iomem *map;
int err = 0;
@@ -786,7 +787,7 @@ static int gtt_set(struct drm_i915_gem_object *obj)
if (IS_ERR(vma))
return PTR_ERR(vma);
- intel_gt_pm_get(vma->vm->gt);
+ wakeref = intel_gt_pm_get(vma->vm->gt);
map = i915_vma_pin_iomap(vma);
i915_vma_unpin(vma);
if (IS_ERR(map)) {
@@ -798,12 +799,13 @@ static int gtt_set(struct drm_i915_gem_object *obj)
i915_vma_unpin_iomap(vma);
out:
- intel_gt_pm_put(vma->vm->gt);
+ intel_gt_pm_put(vma->vm->gt, wakeref);
return err;
}
static int gtt_check(struct drm_i915_gem_object *obj)
{
+ intel_wakeref_t wakeref;
struct i915_vma *vma;
void __iomem *map;
int err = 0;
@@ -812,7 +814,7 @@ static int gtt_check(struct drm_i915_gem_object *obj)
if (IS_ERR(vma))
return PTR_ERR(vma);
- intel_gt_pm_get(vma->vm->gt);
+ wakeref = intel_gt_pm_get(vma->vm->gt);
map = i915_vma_pin_iomap(vma);
i915_vma_unpin(vma);
if (IS_ERR(map)) {
@@ -828,7 +830,7 @@ static int gtt_check(struct drm_i915_gem_object *obj)
i915_vma_unpin_iomap(vma);
out:
- intel_gt_pm_put(vma->vm->gt);
+ intel_gt_pm_put(vma->vm->gt, wakeref);
return err;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
index e199d7dbb876..2b0327cc47c2 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
@@ -83,7 +83,7 @@ live_context(struct drm_i915_private *i915, struct file *file)
int err;
u32 id;
- pc = proto_context_create(i915, 0);
+ pc = proto_context_create(fpriv, i915, 0);
if (IS_ERR(pc))
return ERR_CAST(pc);
@@ -152,7 +152,7 @@ kernel_context(struct drm_i915_private *i915,
struct i915_gem_context *ctx;
struct i915_gem_proto_context *pc;
- pc = proto_context_create(i915, 0);
+ pc = proto_context_create(NULL, i915, 0);
if (IS_ERR(pc))
return ERR_CAST(pc);
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
index 9895e18df043..fa46d2308b0e 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -5,6 +5,7 @@
#include <linux/log2.h>
+#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_lmem.h"
#include "gen8_ppgtt.h"
@@ -222,6 +223,9 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
{
struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ if (vm->rsvd.obj)
+ i915_gem_object_put(vm->rsvd.obj);
+
if (intel_vgpu_active(vm->i915))
gen8_ppgtt_notify_vgt(ppgtt, false);
@@ -950,6 +954,41 @@ err_pd:
return ERR_PTR(err);
}
+static int gen8_init_rsvd(struct i915_address_space *vm)
+{
+ struct drm_i915_private *i915 = vm->i915;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int ret;
+
+ /* The memory will be used only by GPU. */
+ obj = i915_gem_object_create_lmem(i915, PAGE_SIZE,
+ I915_BO_ALLOC_VOLATILE |
+ I915_BO_ALLOC_GPU_ONLY);
+ if (IS_ERR(obj))
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto unref;
+ }
+
+ ret = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_HIGH);
+ if (ret)
+ goto unref;
+
+ vm->rsvd.vma = i915_vma_make_unshrinkable(vma);
+ vm->rsvd.obj = obj;
+ vm->total -= vma->node.size;
+ return 0;
+unref:
+ i915_gem_object_put(obj);
+ return ret;
+}
+
/*
* GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
* with a net effect resembling a 2-level page table in normal x86 terms. Each
@@ -1031,6 +1070,10 @@ struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt,
if (intel_vgpu_active(gt->i915))
gen8_ppgtt_notify_vgt(ppgtt, true);
+ err = gen8_init_rsvd(&ppgtt->vm);
+ if (err)
+ goto err_put;
+
return ppgtt;
err_put:
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index ecc990ec1b95..d650beb8ed22 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -28,11 +28,14 @@ static void irq_disable(struct intel_breadcrumbs *b)
static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
{
+ intel_wakeref_t wakeref;
+
/*
* Since we are waiting on a request, the GPU should be busy
* and should have its own rpm reference.
*/
- if (GEM_WARN_ON(!intel_gt_pm_get_if_awake(b->irq_engine->gt)))
+ wakeref = intel_gt_pm_get_if_awake(b->irq_engine->gt);
+ if (GEM_WARN_ON(!wakeref))
return;
/*
@@ -41,7 +44,7 @@ static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
* which we can add a new waiter and avoid the cost of re-enabling
* the irq.
*/
- WRITE_ONCE(b->irq_armed, true);
+ WRITE_ONCE(b->irq_armed, wakeref);
/* Requests may have completed before we could enable the interrupt. */
if (!b->irq_enabled++ && b->irq_enable(b))
@@ -61,12 +64,14 @@ static void intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
{
+ intel_wakeref_t wakeref = b->irq_armed;
+
GEM_BUG_ON(!b->irq_enabled);
if (!--b->irq_enabled)
b->irq_disable(b);
- WRITE_ONCE(b->irq_armed, false);
- intel_gt_pm_put_async(b->irq_engine->gt);
+ WRITE_ONCE(b->irq_armed, 0);
+ intel_gt_pm_put_async(b->irq_engine->gt, wakeref);
}
static void intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h b/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h
index 72dfd3748c4c..bdf09fd67b6e 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h
@@ -13,6 +13,7 @@
#include <linux/types.h>
#include "intel_engine_types.h"
+#include "intel_wakeref.h"
/*
* Rather than have every client wait upon all user interrupts,
@@ -43,7 +44,7 @@ struct intel_breadcrumbs {
spinlock_t irq_lock; /* protects the interrupt from hardirq context */
struct irq_work irq_work; /* for use from inside irq_lock */
unsigned int irq_enabled;
- bool irq_armed;
+ intel_wakeref_t irq_armed;
/* Not all breadcrumbs are attached to physical HW */
intel_engine_mask_t engine_mask;
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index a53b26178f0a..a2f1245741bb 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -6,6 +6,7 @@
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_pm.h"
+#include "i915_drm_client.h"
#include "i915_drv.h"
#include "i915_trace.h"
@@ -50,6 +51,7 @@ intel_context_create(struct intel_engine_cs *engine)
int intel_context_alloc_state(struct intel_context *ce)
{
+ struct i915_gem_context *ctx;
int err = 0;
if (mutex_lock_interruptible(&ce->pin_mutex))
@@ -66,6 +68,18 @@ int intel_context_alloc_state(struct intel_context *ce)
goto unlock;
set_bit(CONTEXT_ALLOC_BIT, &ce->flags);
+
+ rcu_read_lock();
+ ctx = rcu_dereference(ce->gem_context);
+ if (ctx && !kref_get_unless_zero(&ctx->ref))
+ ctx = NULL;
+ rcu_read_unlock();
+ if (ctx) {
+ if (ctx->client)
+ i915_drm_client_add_context_objects(ctx->client,
+ ce);
+ i915_gem_context_put(ctx);
+ }
}
unlock:
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index a80e3b7c24ff..25564c01507e 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -212,7 +212,7 @@ static inline void intel_context_enter(struct intel_context *ce)
return;
ce->ops->enter(ce);
- intel_gt_pm_get(ce->vm->gt);
+ ce->wakeref = intel_gt_pm_get(ce->vm->gt);
}
static inline void intel_context_mark_active(struct intel_context *ce)
@@ -229,7 +229,7 @@ static inline void intel_context_exit(struct intel_context *ce)
if (--ce->active_count)
return;
- intel_gt_pm_put_async(ce->vm->gt);
+ intel_gt_pm_put_async(ce->vm->gt, ce->wakeref);
ce->ops->exit(ce);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index aceaac28a33e..7eccbd70d89f 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -17,6 +17,7 @@
#include "i915_utils.h"
#include "intel_engine_types.h"
#include "intel_sseu.h"
+#include "intel_wakeref.h"
#include "uc/intel_guc_fwif.h"
@@ -112,6 +113,7 @@ struct intel_context {
u32 ring_size;
struct intel_ring *ring;
struct intel_timeline *timeline;
+ intel_wakeref_t wakeref;
unsigned long flags;
#define CONTEXT_BARRIER_BIT 0
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 4a11219e560e..40687806d22a 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -47,7 +47,7 @@
#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
#define GEN11_LR_CONTEXT_RENDER_SIZE (14 * PAGE_SIZE)
-#define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE)
+#define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
#define MAX_MMIO_BASES 3
struct engine_info {
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
index 9a527e1f5be6..1a8e2b7db013 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
@@ -188,7 +188,7 @@ static void heartbeat(struct work_struct *wrk)
* low latency and no jitter] the chance to naturally
* complete before being preempted.
*/
- attr.priority = 0;
+ attr.priority = I915_PRIORITY_NORMAL;
if (rq->sched.attr.priority >= attr.priority)
attr.priority = I915_PRIORITY_HEARTBEAT;
if (rq->sched.attr.priority >= attr.priority)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index e91fc881dbf1..96bdb93a948d 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -63,7 +63,7 @@ static int __engine_unpark(struct intel_wakeref *wf)
ENGINE_TRACE(engine, "\n");
- intel_gt_pm_get(engine->gt);
+ engine->wakeref_track = intel_gt_pm_get(engine->gt);
/* Discard stale context state from across idling */
ce = engine->kernel_context;
@@ -122,6 +122,7 @@ __queue_and_release_pm(struct i915_request *rq,
*/
GEM_BUG_ON(rq->context->active_count != 1);
__intel_gt_pm_get(engine->gt);
+ rq->context->wakeref = intel_wakeref_track(&engine->gt->wakeref);
/*
* We have to serialise all potential retirement paths with our
@@ -285,7 +286,7 @@ static int __engine_park(struct intel_wakeref *wf)
engine->park(engine);
/* While gt calls i915_vma_parked(), we have to break the lock cycle */
- intel_gt_pm_put_async(engine->gt);
+ intel_gt_pm_put_async(engine->gt, engine->wakeref_track);
return 0;
}
@@ -296,7 +297,7 @@ static const struct intel_wakeref_ops wf_ops = {
void intel_engine_init__pm(struct intel_engine_cs *engine)
{
- intel_wakeref_init(&engine->wakeref, engine->i915, &wf_ops);
+ intel_wakeref_init(&engine->wakeref, engine->i915, &wf_ops, engine->name);
intel_engine_init_heartbeat(engine);
intel_gsc_idle_msg_enable(engine);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_regs.h b/drivers/gpu/drm/i915/gt/intel_engine_regs.h
index fdd4ddd3a978..a8eac59e3779 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_regs.h
@@ -118,9 +118,15 @@
#define CCID_EXTENDED_STATE_RESTORE BIT(2)
#define CCID_EXTENDED_STATE_SAVE BIT(3)
#define RING_BB_PER_CTX_PTR(base) _MMIO((base) + 0x1c0) /* gen8+ */
+#define PER_CTX_BB_FORCE BIT(2)
+#define PER_CTX_BB_VALID BIT(0)
+
#define RING_INDIRECT_CTX(base) _MMIO((base) + 0x1c4) /* gen8+ */
#define RING_INDIRECT_CTX_OFFSET(base) _MMIO((base) + 0x1c8) /* gen8+ */
#define ECOSKPD(base) _MMIO((base) + 0x1d0)
+#define XEHP_BLITTER_SCHEDULING_MODE_MASK REG_GENMASK(12, 11)
+#define XEHP_BLITTER_ROUND_ROBIN_MODE \
+ REG_FIELD_PREP(XEHP_BLITTER_SCHEDULING_MODE_MASK, 1)
#define ECO_CONSTANT_BUFFER_SR_DISABLE REG_BIT(4)
#define ECO_GATING_CX_ONLY REG_BIT(3)
#define GEN6_BLITTER_FBC_NOTIFY REG_BIT(3)
@@ -257,5 +263,7 @@
#define VDBOX_CGCTL3F18(base) _MMIO((base) + 0x3f18)
#define ALNUNIT_CLKGATE_DIS REG_BIT(13)
+#define VDBOX_CGCTL3F1C(base) _MMIO((base) + 0x3f1c)
+#define MFXPIPE_CLKGATE_DIS REG_BIT(3)
#endif /* __INTEL_ENGINE_REGS__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 8769760257fd..960e6be2042f 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -446,7 +446,9 @@ struct intel_engine_cs {
unsigned long serial;
unsigned long wakeref_serial;
+ intel_wakeref_t wakeref_track;
struct intel_wakeref wakeref;
+
struct file *default_state;
struct {
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.c b/drivers/gpu/drm/i915/gt/intel_engine_user.c
index 118164ddbb2e..833987015b8b 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_user.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_user.c
@@ -41,12 +41,15 @@ void intel_engine_add_user(struct intel_engine_cs *engine)
llist_add(&engine->uabi_llist, &engine->i915->uabi_engines_llist);
}
-static const u8 uabi_classes[] = {
+#define I915_NO_UABI_CLASS ((u16)(-1))
+
+static const u16 uabi_classes[] = {
[RENDER_CLASS] = I915_ENGINE_CLASS_RENDER,
[COPY_ENGINE_CLASS] = I915_ENGINE_CLASS_COPY,
[VIDEO_DECODE_CLASS] = I915_ENGINE_CLASS_VIDEO,
[VIDEO_ENHANCEMENT_CLASS] = I915_ENGINE_CLASS_VIDEO_ENHANCE,
[COMPUTE_CLASS] = I915_ENGINE_CLASS_COMPUTE,
+ [OTHER_CLASS] = I915_NO_UABI_CLASS, /* Not exposed to users, no uabi class. */
};
static int engine_cmp(void *priv, const struct list_head *A,
@@ -200,6 +203,7 @@ static void engine_rename(struct intel_engine_cs *engine, const char *name, u16
void intel_engines_driver_register(struct drm_i915_private *i915)
{
+ u16 name_instance, other_instance = 0;
struct legacy_ring ring = {};
struct list_head *it, *next;
struct rb_node **p, *prev;
@@ -216,27 +220,28 @@ void intel_engines_driver_register(struct drm_i915_private *i915)
if (intel_gt_has_unrecoverable_error(engine->gt))
continue; /* ignore incomplete engines */
- /*
- * We don't want to expose the GSC engine to the users, but we
- * still rename it so it is easier to identify in the debug logs
- */
- if (engine->id == GSC0) {
- engine_rename(engine, "gsc", 0);
- continue;
- }
-
GEM_BUG_ON(engine->class >= ARRAY_SIZE(uabi_classes));
engine->uabi_class = uabi_classes[engine->class];
+ if (engine->uabi_class == I915_NO_UABI_CLASS) {
+ name_instance = other_instance++;
+ } else {
+ GEM_BUG_ON(engine->uabi_class >=
+ ARRAY_SIZE(i915->engine_uabi_class_count));
+ name_instance =
+ i915->engine_uabi_class_count[engine->uabi_class]++;
+ }
+ engine->uabi_instance = name_instance;
- GEM_BUG_ON(engine->uabi_class >=
- ARRAY_SIZE(i915->engine_uabi_class_count));
- engine->uabi_instance =
- i915->engine_uabi_class_count[engine->uabi_class]++;
-
- /* Replace the internal name with the final user facing name */
+ /*
+ * Replace the internal name with the final user and log facing
+ * name.
+ */
engine_rename(engine,
intel_engine_class_repr(engine->class),
- engine->uabi_instance);
+ name_instance);
+
+ if (engine->uabi_class == I915_NO_UABI_CLASS)
+ continue;
rb_link_node(&engine->uabi_node, prev, p);
rb_insert_color(&engine->uabi_node, &i915->uabi_engines);
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index e8f42ec6b1b4..42aade0faf2d 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -630,7 +630,7 @@ static void __execlists_schedule_out(struct i915_request * const rq,
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
if (engine->fw_domain && !--engine->fw_active)
intel_uncore_forcewake_put(engine->uncore, engine->fw_domain);
- intel_gt_pm_put_async(engine->gt);
+ intel_gt_pm_put_async_untracked(engine->gt);
/*
* If this is part of a virtual engine, its next request may
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index 15fc8e4703f4..21a7e3191c18 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -245,16 +245,15 @@ static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
gen8_ggtt_invalidate(ggtt);
list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) {
- if (intel_guc_tlb_invalidation_is_available(&gt->uc.guc)) {
+ if (intel_guc_tlb_invalidation_is_available(&gt->uc.guc))
guc_ggtt_ct_invalidate(gt);
- } else if (GRAPHICS_VER(i915) >= 12) {
+ else if (GRAPHICS_VER(i915) >= 12)
intel_uncore_write_fw(gt->uncore,
GEN12_GUC_TLB_INV_CR,
GEN12_GUC_TLB_INV_CR_INVALIDATE);
- } else {
+ else
intel_uncore_write_fw(gt->uncore,
GEN8_GTCR, GEN8_GTCR_INVALIDATE);
- }
}
}
@@ -297,7 +296,7 @@ static bool should_update_ggtt_with_bind(struct i915_ggtt *ggtt)
return intel_gt_is_bind_context_ready(gt);
}
-static struct intel_context *gen8_ggtt_bind_get_ce(struct i915_ggtt *ggtt)
+static struct intel_context *gen8_ggtt_bind_get_ce(struct i915_ggtt *ggtt, intel_wakeref_t *wakeref)
{
struct intel_context *ce;
struct intel_gt *gt = ggtt->vm.gt;
@@ -314,7 +313,8 @@ static struct intel_context *gen8_ggtt_bind_get_ce(struct i915_ggtt *ggtt)
* would conflict with fs_reclaim trying to allocate memory while
* doing rpm_resume().
*/
- if (!intel_gt_pm_get_if_awake(gt))
+ *wakeref = intel_gt_pm_get_if_awake(gt);
+ if (!*wakeref)
return NULL;
intel_engine_pm_get(ce->engine);
@@ -322,10 +322,10 @@ static struct intel_context *gen8_ggtt_bind_get_ce(struct i915_ggtt *ggtt)
return ce;
}
-static void gen8_ggtt_bind_put_ce(struct intel_context *ce)
+static void gen8_ggtt_bind_put_ce(struct intel_context *ce, intel_wakeref_t wakeref)
{
intel_engine_pm_put(ce->engine);
- intel_gt_pm_put(ce->engine->gt);
+ intel_gt_pm_put(ce->engine->gt, wakeref);
}
static bool gen8_ggtt_bind_ptes(struct i915_ggtt *ggtt, u32 offset,
@@ -338,12 +338,13 @@ static bool gen8_ggtt_bind_ptes(struct i915_ggtt *ggtt, u32 offset,
struct sgt_iter iter;
struct i915_request *rq;
struct intel_context *ce;
+ intel_wakeref_t wakeref;
u32 *cs;
if (!num_entries)
return true;
- ce = gen8_ggtt_bind_get_ce(ggtt);
+ ce = gen8_ggtt_bind_get_ce(ggtt, &wakeref);
if (!ce)
return false;
@@ -419,13 +420,13 @@ queue_err_rq:
offset += n_ptes;
}
- gen8_ggtt_bind_put_ce(ce);
+ gen8_ggtt_bind_put_ce(ce, wakeref);
return true;
err_rq:
i915_request_put(rq);
put_ce:
- gen8_ggtt_bind_put_ce(ce);
+ gen8_ggtt_bind_put_ce(ce, wakeref);
return false;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gsc.h b/drivers/gpu/drm/i915/gt/intel_gsc.h
index 7ab3ca0f9f26..013c64251448 100644
--- a/drivers/gpu/drm/i915/gt/intel_gsc.h
+++ b/drivers/gpu/drm/i915/gt/intel_gsc.h
@@ -21,8 +21,11 @@ struct mei_aux_device;
/**
* struct intel_gsc - graphics security controller
*
- * @gem_obj: scratch memory GSC operations
- * @intf : gsc interface
+ * @intf: gsc interface
+ * @intf.adev: MEI aux. device for this @intf
+ * @intf.gem_obj: scratch memory GSC operations
+ * @intf.irq: IRQ for this device (%-1 for no IRQ)
+ * @intf.id: this interface's id number/index
*/
struct intel_gsc {
struct intel_gsc_intf {
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index ba1186fc524f..a425db5ed3a2 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -451,7 +451,7 @@ void intel_gt_flush_ggtt_writes(struct intel_gt *gt)
spin_lock_irqsave(&uncore->lock, flags);
intel_uncore_posting_read_fw(uncore,
- RING_HEAD(RENDER_RING_BASE));
+ RING_TAIL(RENDER_RING_BASE));
spin_unlock_irqrestore(&uncore->lock, flags);
}
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index e1f13735f530..608f5c872928 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -82,6 +82,10 @@ struct drm_printer;
##__VA_ARGS__); \
} while (0)
+#define NEEDS_FASTCOLOR_BLT_WABB(engine) ( \
+ IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 55), IP_VER(12, 71)) && \
+ engine->class == COPY_ENGINE_CLASS && engine->instance == 0)
+
static inline bool gt_is_root(struct intel_gt *gt)
{
return !gt->info.id;
@@ -114,6 +118,11 @@ static inline struct intel_gt *gsc_to_gt(struct intel_gsc *gsc)
return container_of(gsc, struct intel_gt, gsc);
}
+static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
+{
+ return guc_to_gt(guc)->i915;
+}
+
void intel_gt_common_init_early(struct intel_gt *gt);
int intel_root_gt_init_early(struct drm_i915_private *i915);
int intel_gt_assign_ggtt(struct intel_gt *gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
index 34913912d8ae..e253750a51c5 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
@@ -388,8 +388,7 @@ void intel_gt_mcr_lock(struct intel_gt *gt, unsigned long *flags)
* registers. This wakeref will be released in the unlock
* routine.
*
- * This is expected to become a formally documented/numbered
- * workaround soon.
+ * Wa_22018931422
*/
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_GT);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index f5899d503e23..220ac4f92edf 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -28,19 +28,20 @@
static void user_forcewake(struct intel_gt *gt, bool suspend)
{
int count = atomic_read(&gt->user_wakeref);
+ intel_wakeref_t wakeref;
/* Inside suspend/resume so single threaded, no races to worry about. */
if (likely(!count))
return;
- intel_gt_pm_get(gt);
+ wakeref = intel_gt_pm_get(gt);
if (suspend) {
GEM_BUG_ON(count > atomic_read(&gt->wakeref.count));
atomic_sub(count, &gt->wakeref.count);
} else {
atomic_add(count, &gt->wakeref.count);
}
- intel_gt_pm_put(gt);
+ intel_gt_pm_put(gt, wakeref);
}
static void runtime_begin(struct intel_gt *gt)
@@ -138,7 +139,7 @@ void intel_gt_pm_init_early(struct intel_gt *gt)
* runtime_pm is per-device rather than per-tile, so this is still the
* correct structure.
*/
- intel_wakeref_init(&gt->wakeref, gt->i915, &wf_ops);
+ intel_wakeref_init(&gt->wakeref, gt->i915, &wf_ops, "GT");
seqcount_mutex_init(&gt->stats.lock, &gt->wakeref.mutex);
}
@@ -167,7 +168,7 @@ static void gt_sanitize(struct intel_gt *gt, bool force)
enum intel_engine_id id;
intel_wakeref_t wakeref;
- GT_TRACE(gt, "force:%s", str_yes_no(force));
+ GT_TRACE(gt, "force:%s\n", str_yes_no(force));
/* Use a raw wakeref to avoid calling intel_display_power_get early */
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
@@ -236,6 +237,7 @@ int intel_gt_resume(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ intel_wakeref_t wakeref;
int err;
err = intel_gt_has_unrecoverable_error(gt);
@@ -252,7 +254,7 @@ int intel_gt_resume(struct intel_gt *gt)
*/
gt_sanitize(gt, true);
- intel_gt_pm_get(gt);
+ wakeref = intel_gt_pm_get(gt);
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
intel_rc6_sanitize(&gt->rc6);
@@ -295,7 +297,7 @@ int intel_gt_resume(struct intel_gt *gt)
out_fw:
intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
- intel_gt_pm_put(gt);
+ intel_gt_pm_put(gt, wakeref);
intel_gt_bind_context_set_ready(gt);
return err;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
index b1eeb5b33918..911fd0160221 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
@@ -16,19 +16,28 @@ static inline bool intel_gt_pm_is_awake(const struct intel_gt *gt)
return intel_wakeref_is_active(&gt->wakeref);
}
-static inline void intel_gt_pm_get(struct intel_gt *gt)
+static inline void intel_gt_pm_get_untracked(struct intel_gt *gt)
{
intel_wakeref_get(&gt->wakeref);
}
+static inline intel_wakeref_t intel_gt_pm_get(struct intel_gt *gt)
+{
+ intel_gt_pm_get_untracked(gt);
+ return intel_wakeref_track(&gt->wakeref);
+}
+
static inline void __intel_gt_pm_get(struct intel_gt *gt)
{
__intel_wakeref_get(&gt->wakeref);
}
-static inline bool intel_gt_pm_get_if_awake(struct intel_gt *gt)
+static inline intel_wakeref_t intel_gt_pm_get_if_awake(struct intel_gt *gt)
{
- return intel_wakeref_get_if_active(&gt->wakeref);
+ if (!intel_wakeref_get_if_active(&gt->wakeref))
+ return 0;
+
+ return intel_wakeref_track(&gt->wakeref);
}
static inline void intel_gt_pm_might_get(struct intel_gt *gt)
@@ -36,12 +45,18 @@ static inline void intel_gt_pm_might_get(struct intel_gt *gt)
intel_wakeref_might_get(&gt->wakeref);
}
-static inline void intel_gt_pm_put(struct intel_gt *gt)
+static inline void intel_gt_pm_put_untracked(struct intel_gt *gt)
{
intel_wakeref_put(&gt->wakeref);
}
-static inline void intel_gt_pm_put_async(struct intel_gt *gt)
+static inline void intel_gt_pm_put(struct intel_gt *gt, intel_wakeref_t handle)
+{
+ intel_wakeref_untrack(&gt->wakeref, handle);
+ intel_gt_pm_put_untracked(gt);
+}
+
+static inline void intel_gt_pm_put_async_untracked(struct intel_gt *gt)
{
intel_wakeref_put_async(&gt->wakeref);
}
@@ -51,9 +66,14 @@ static inline void intel_gt_pm_might_put(struct intel_gt *gt)
intel_wakeref_might_put(&gt->wakeref);
}
-#define with_intel_gt_pm(gt, tmp) \
- for (tmp = 1, intel_gt_pm_get(gt); tmp; \
- intel_gt_pm_put(gt), tmp = 0)
+static inline void intel_gt_pm_put_async(struct intel_gt *gt, intel_wakeref_t handle)
+{
+ intel_wakeref_untrack(&gt->wakeref, handle);
+ intel_gt_pm_put_async_untracked(gt);
+}
+
+#define with_intel_gt_pm(gt, wf) \
+ for (wf = intel_gt_pm_get(gt); wf; intel_gt_pm_put(gt, wf), wf = 0)
/**
* with_intel_gt_pm_if_awake - if GT is PM awake, get a reference to prevent
@@ -64,7 +84,7 @@ static inline void intel_gt_pm_might_put(struct intel_gt *gt)
* @wf: pointer to a temporary wakeref.
*/
#define with_intel_gt_pm_if_awake(gt, wf) \
- for (wf = intel_gt_pm_get_if_awake(gt); wf; intel_gt_pm_put_async(gt), wf = 0)
+ for (wf = intel_gt_pm_get_if_awake(gt); wf; intel_gt_pm_put_async(gt, wf), wf = 0)
static inline int intel_gt_pm_wait_for_idle(struct intel_gt *gt)
{
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
index f900cc68d6d9..7114c116e928 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
@@ -27,7 +27,7 @@
void intel_gt_pm_debugfs_forcewake_user_open(struct intel_gt *gt)
{
atomic_inc(&gt->user_wakeref);
- intel_gt_pm_get(gt);
+ intel_gt_pm_get_untracked(gt);
if (GRAPHICS_VER(gt->i915) >= 6)
intel_uncore_forcewake_user_get(gt->uncore);
}
@@ -36,7 +36,7 @@ void intel_gt_pm_debugfs_forcewake_user_release(struct intel_gt *gt)
{
if (GRAPHICS_VER(gt->i915) >= 6)
intel_uncore_forcewake_user_put(gt->uncore);
- intel_gt_pm_put(gt);
+ intel_gt_pm_put_untracked(gt);
atomic_dec(&gt->user_wakeref);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
index eecd0a87a647..50962cfd1353 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
@@ -469,6 +469,9 @@
#define XEHP_PSS_MODE2 MCR_REG(0x703c)
#define SCOREBOARD_STALL_FLUSH_CONTROL REG_BIT(5)
+#define XEHP_PSS_CHICKEN MCR_REG(0x7044)
+#define FD_END_COLLECT REG_BIT(5)
+
#define GEN7_SC_INSTDONE _MMIO(0x7100)
#define GEN12_SC_INSTDONE_EXTRA _MMIO(0x7104)
#define GEN12_SC_INSTDONE_EXTRA2 _MMIO(0x7108)
@@ -537,6 +540,9 @@
#define XEHP_SQCM MCR_REG(0x8724)
#define EN_32B_ACCESS REG_BIT(30)
+#define MTL_GSCPSMI_BASEADDR_LSB _MMIO(0x880c)
+#define MTL_GSCPSMI_BASEADDR_MSB _MMIO(0x8810)
+
#define HSW_IDICR _MMIO(0x9008)
#define IDIHASHMSK(x) (((x) & 0x3f) << 16)
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index 4fbed27ef0ec..86f73fe558ca 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -63,6 +63,9 @@ struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz)
if (!IS_ERR(obj)) {
obj->base.resv = i915_vm_resv_get(vm);
obj->shares_resv_from = vm;
+
+ if (vm->fpriv)
+ i915_drm_client_add_object(vm->fpriv->client, obj);
}
return obj;
@@ -84,6 +87,9 @@ struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz)
if (!IS_ERR(obj)) {
obj->base.resv = i915_vm_resv_get(vm);
obj->shares_resv_from = vm;
+
+ if (vm->fpriv)
+ i915_drm_client_add_object(vm->fpriv->client, obj);
}
return obj;
@@ -95,6 +101,16 @@ int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
void *vaddr;
type = intel_gt_coherent_map_type(vm->gt, obj, true);
+ /*
+ * FIXME: It is suspected that some Address Translation Service (ATS)
+ * issue on IOMMU is causing CAT errors to occur on some MTL workloads.
+ * Applying a write barrier to the ppgtt set entry functions appeared
+ * to have no effect, so we must temporarily use I915_MAP_WC here on
+ * MTL until a proper ATS solution is found.
+ */
+ if (IS_METEORLAKE(vm->i915))
+ type = I915_MAP_WC;
+
vaddr = i915_gem_object_pin_map_unlocked(obj, type);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
@@ -109,6 +125,16 @@ int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object
void *vaddr;
type = intel_gt_coherent_map_type(vm->gt, obj, true);
+ /*
+ * FIXME: It is suspected that some Address Translation Service (ATS)
+ * issue on IOMMU is causing CAT errors to occur on some MTL workloads.
+ * Applying a write barrier to the ppgtt set entry functions appeared
+ * to have no effect, so we must temporarily use I915_MAP_WC here on
+ * MTL until a proper ATS solution is found.
+ */
+ if (IS_METEORLAKE(vm->i915))
+ type = I915_MAP_WC;
+
vaddr = i915_gem_object_pin_map(obj, type);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index b471edac2699..6b85222ee3ea 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -249,8 +249,13 @@ struct i915_address_space {
struct work_struct release_work;
struct drm_mm mm;
+ struct {
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ } rsvd;
struct intel_gt *gt;
struct drm_i915_private *i915;
+ struct drm_i915_file_private *fpriv;
struct device *dma;
u64 total; /* size addr space maps (ex. 2GB for ggtt) */
u64 reserved; /* size addr space reserved */
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index eaf66d903166..7c367ba8d9dc 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -829,6 +829,18 @@ lrc_ring_indirect_offset_default(const struct intel_engine_cs *engine)
}
static void
+lrc_setup_bb_per_ctx(u32 *regs,
+ const struct intel_engine_cs *engine,
+ u32 ctx_bb_ggtt_addr)
+{
+ GEM_BUG_ON(lrc_ring_wa_bb_per_ctx(engine) == -1);
+ regs[lrc_ring_wa_bb_per_ctx(engine) + 1] =
+ ctx_bb_ggtt_addr |
+ PER_CTX_BB_FORCE |
+ PER_CTX_BB_VALID;
+}
+
+static void
lrc_setup_indirect_ctx(u32 *regs,
const struct intel_engine_cs *engine,
u32 ctx_bb_ggtt_addr,
@@ -1020,7 +1032,13 @@ static u32 context_wa_bb_offset(const struct intel_context *ce)
return PAGE_SIZE * ce->wa_bb_page;
}
-static u32 *context_indirect_bb(const struct intel_context *ce)
+/*
+ * per_ctx below determines which WABB section is used.
+ * When true, the function returns the location of the
+ * PER_CTX_BB. When false, the function returns the
+ * location of the INDIRECT_CTX.
+ */
+static u32 *context_wabb(const struct intel_context *ce, bool per_ctx)
{
void *ptr;
@@ -1029,6 +1047,7 @@ static u32 *context_indirect_bb(const struct intel_context *ce)
ptr = ce->lrc_reg_state;
ptr -= LRC_STATE_OFFSET; /* back to start of context image */
ptr += context_wa_bb_offset(ce);
+ ptr += per_ctx ? PAGE_SIZE : 0;
return ptr;
}
@@ -1105,7 +1124,8 @@ __lrc_alloc_state(struct intel_context *ce, struct intel_engine_cs *engine)
if (GRAPHICS_VER(engine->i915) >= 12) {
ce->wa_bb_page = context_size / PAGE_SIZE;
- context_size += PAGE_SIZE;
+ /* INDIRECT_CTX and PER_CTX_BB need separate pages. */
+ context_size += PAGE_SIZE * 2;
}
if (intel_context_is_parent(ce) && intel_engine_uses_guc(engine)) {
@@ -1407,12 +1427,85 @@ gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs)
return gen12_emit_aux_table_inv(ce->engine, cs);
}
+static u32 *xehp_emit_fastcolor_blt_wabb(const struct intel_context *ce, u32 *cs)
+{
+ struct intel_gt *gt = ce->engine->gt;
+ int mocs = gt->mocs.uc_index << 1;
+
+ /**
+ * Wa_16018031267 / Wa_16018063123 requires that SW forces the
+ * main copy engine arbitration into round robin mode. We
+ * additionally need to submit the following WABB blt command
+ * to produce 4 subblits with each subblit generating 0 byte
+ * write requests as WABB:
+ *
+ * XY_FASTCOLOR_BLT
+ * BG0 -> 5100000E
+ * BG1 -> 0000003F (Dest pitch)
+ * BG2 -> 00000000 (X1, Y1) = (0, 0)
+ * BG3 -> 00040001 (X2, Y2) = (1, 4)
+ * BG4 -> scratch
+ * BG5 -> scratch
+ * BG6-12 -> 00000000
+ * BG13 -> 20004004 (Surf. Width= 2,Surf. Height = 5 )
+ * BG14 -> 00000010 (Qpitch = 4)
+ * BG15 -> 00000000
+ */
+ *cs++ = XY_FAST_COLOR_BLT_CMD | (16 - 2);
+ *cs++ = FIELD_PREP(XY_FAST_COLOR_BLT_MOCS_MASK, mocs) | 0x3f;
+ *cs++ = 0;
+ *cs++ = 4 << 16 | 1;
+ *cs++ = lower_32_bits(i915_vma_offset(ce->vm->rsvd.vma));
+ *cs++ = upper_32_bits(i915_vma_offset(ce->vm->rsvd.vma));
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0x20004004;
+ *cs++ = 0x10;
+ *cs++ = 0;
+
+ return cs;
+}
+
+static u32 *
+xehp_emit_per_ctx_bb(const struct intel_context *ce, u32 *cs)
+{
+ /* Wa_16018031267, Wa_16018063123 */
+ if (NEEDS_FASTCOLOR_BLT_WABB(ce->engine))
+ cs = xehp_emit_fastcolor_blt_wabb(ce, cs);
+
+ return cs;
+}
+
+static void
+setup_per_ctx_bb(const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ u32 *(*emit)(const struct intel_context *, u32 *))
+{
+ /* Place PER_CTX_BB on next page after INDIRECT_CTX */
+ u32 * const start = context_wabb(ce, true);
+ u32 *cs;
+
+ cs = emit(ce, start);
+
+ /* PER_CTX_BB must manually terminate */
+ *cs++ = MI_BATCH_BUFFER_END;
+
+ GEM_BUG_ON(cs - start > I915_GTT_PAGE_SIZE / sizeof(*cs));
+ lrc_setup_bb_per_ctx(ce->lrc_reg_state, engine,
+ lrc_indirect_bb(ce) + PAGE_SIZE);
+}
+
static void
setup_indirect_ctx_bb(const struct intel_context *ce,
const struct intel_engine_cs *engine,
u32 *(*emit)(const struct intel_context *, u32 *))
{
- u32 * const start = context_indirect_bb(ce);
+ u32 * const start = context_wabb(ce, false);
u32 *cs;
cs = emit(ce, start);
@@ -1511,6 +1604,7 @@ u32 lrc_update_regs(const struct intel_context *ce,
/* Mutually exclusive wrt to global indirect bb */
GEM_BUG_ON(engine->wa_ctx.indirect_ctx.size);
setup_indirect_ctx_bb(ce, engine, fn);
+ setup_per_ctx_bb(ce, engine, xehp_emit_per_ctx_bb);
}
return lrc_descriptor(ce) | CTX_DESC_FORCE_RESTORE;
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index d5ed904f355d..6801f8b95c53 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -1293,7 +1293,7 @@ int __intel_engine_reset_bh(struct intel_engine_cs *engine, const char *msg)
if (msg)
drm_notice(&engine->i915->drm,
"Resetting %s for %s\n", engine->name, msg);
- atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class]);
+ i915_increase_reset_engine_count(&engine->i915->gpu_error, engine);
ret = intel_gt_reset_engine(engine);
if (ret) {
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c
index f602895f6d0d..6a3246240e81 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.c
@@ -849,13 +849,12 @@ void intel_sseu_print_topology(struct drm_i915_private *i915,
const struct sseu_dev_info *sseu,
struct drm_printer *p)
{
- if (sseu->max_slices == 0) {
+ if (sseu->max_slices == 0)
drm_printf(p, "Unavailable\n");
- } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
+ else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
sseu_print_xehp_topology(sseu, p);
- } else {
+ else
sseu_print_hsw_topology(sseu, p);
- }
}
void intel_sseu_print_ss_info(const char *type,
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 192ac0e59afa..3eacbc50caf8 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -777,6 +777,9 @@ static void dg2_ctx_workarounds_init(struct intel_engine_cs *engine,
/* Wa_18019271663:dg2 */
wa_masked_en(wal, CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE);
+
+ /* Wa_14019877138:dg2 */
+ wa_mcr_masked_en(wal, XEHP_PSS_CHICKEN, FD_END_COLLECT);
}
static void xelpg_ctx_gt_tuning_init(struct intel_engine_cs *engine,
@@ -1663,8 +1666,22 @@ xelpg_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
}
static void
+wa_16021867713(struct intel_gt *gt, struct i915_wa_list *wal)
+{
+ struct intel_engine_cs *engine;
+ int id;
+
+ for_each_engine(engine, gt, id)
+ if (engine->class == VIDEO_DECODE_CLASS)
+ wa_write_or(wal, VDBOX_CGCTL3F1C(engine->mmio_base),
+ MFXPIPE_CLKGATE_DIS);
+}
+
+static void
xelpmp_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
+ wa_16021867713(gt, wal);
+
/*
* Wa_14018778641
* Wa_18018781329
@@ -1674,6 +1691,9 @@ xelpmp_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
*/
wa_write_or(wal, XELPMP_GSC_MOD_CTRL, FORCE_MISS_FTLB);
+ /* Wa_22016670082 */
+ wa_write_or(wal, GEN12_SQCNT1, GEN12_STRICT_RAR_ENABLE);
+
debug_dump_steering(gt);
}
@@ -2340,14 +2360,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
0, true);
}
- if (IS_DG2_G11(i915) || IS_DG2_G10(i915)) {
- /* Wa_22014600077:dg2 */
- wa_mcr_add(wal, GEN10_CACHE_MODE_SS, 0,
- _MASKED_BIT_ENABLE(ENABLE_EU_COUNT_FOR_TDL_FLUSH),
- 0 /* Wa_14012342262 write-only reg, so skip verification */,
- true);
- }
-
if (IS_DG2(i915) || IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) ||
IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
/*
@@ -2782,6 +2794,11 @@ xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
RING_SEMA_WAIT_POLL(engine->mmio_base),
1);
}
+ /* Wa_16018031267, Wa_16018063123 */
+ if (NEEDS_FASTCOLOR_BLT_WABB(engine))
+ wa_masked_field_set(wal, ECOSKPD(engine->mmio_base),
+ XEHP_BLITTER_SCHEDULING_MODE_MASK,
+ XEHP_BLITTER_ROUND_ROBIN_MODE);
}
static void
@@ -2915,6 +2932,9 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
* Wa_22015475538:dg2
*/
wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW, DIS_CHAIN_2XSIMD8);
+
+ /* Wa_18028616096 */
+ wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW, UGM_FRAGMENT_THRESHOLD_TO_3);
}
if (IS_DG2_G11(i915)) {
@@ -2943,11 +2963,6 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
true);
}
- if (IS_DG2_G10(i915) || IS_DG2_G12(i915)) {
- /* Wa_18028616096 */
- wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW, UGM_FRAGMENT_THRESHOLD_TO_3);
- }
-
if (IS_XEHPSDV(i915)) {
/* Wa_1409954639 */
wa_mcr_masked_en(wal,
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
index 86cecf7a1105..5ffa5e30f419 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
@@ -21,20 +21,22 @@ static int cmp_u32(const void *A, const void *B)
return *a - *b;
}
-static void perf_begin(struct intel_gt *gt)
+static intel_wakeref_t perf_begin(struct intel_gt *gt)
{
- intel_gt_pm_get(gt);
+ intel_wakeref_t wakeref = intel_gt_pm_get(gt);
/* Boost gpufreq to max [waitboost] and keep it fixed */
atomic_inc(&gt->rps.num_waiters);
queue_work(gt->i915->unordered_wq, &gt->rps.work);
flush_work(&gt->rps.work);
+
+ return wakeref;
}
-static int perf_end(struct intel_gt *gt)
+static int perf_end(struct intel_gt *gt, intel_wakeref_t wakeref)
{
atomic_dec(&gt->rps.num_waiters);
- intel_gt_pm_put(gt);
+ intel_gt_pm_put(gt, wakeref);
return igt_flush_test(gt->i915);
}
@@ -133,12 +135,13 @@ static int perf_mi_bb_start(void *arg)
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ intel_wakeref_t wakeref;
int err = 0;
if (GRAPHICS_VER(gt->i915) < 4) /* Any CS_TIMESTAMP? */
return 0;
- perf_begin(gt);
+ wakeref = perf_begin(gt);
for_each_engine(engine, gt, id) {
struct intel_context *ce = engine->kernel_context;
struct i915_vma *batch;
@@ -207,7 +210,7 @@ out:
pr_info("%s: MI_BB_START cycles: %u\n",
engine->name, trifilter(cycles));
}
- if (perf_end(gt))
+ if (perf_end(gt, wakeref))
err = -EIO;
return err;
@@ -260,12 +263,13 @@ static int perf_mi_noop(void *arg)
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ intel_wakeref_t wakeref;
int err = 0;
if (GRAPHICS_VER(gt->i915) < 4) /* Any CS_TIMESTAMP? */
return 0;
- perf_begin(gt);
+ wakeref = perf_begin(gt);
for_each_engine(engine, gt, id) {
struct intel_context *ce = engine->kernel_context;
struct i915_vma *base, *nop;
@@ -364,7 +368,7 @@ out:
pr_info("%s: 16K MI_NOOP cycles: %u\n",
engine->name, trifilter(cycles));
}
- if (perf_end(gt))
+ if (perf_end(gt, wakeref))
err = -EIO;
return err;
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
index 273d440a53e3..bc441ce7b380 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
@@ -84,7 +84,7 @@ static struct pulse *pulse_create(void)
static void pulse_unlock_wait(struct pulse *p)
{
- i915_active_unlock_wait(&p->active);
+ wait_var_event_timeout(&p->active, i915_active_is_idle(&p->active), HZ);
}
static int __live_idle_pulse(struct intel_engine_cs *engine,
diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
index 0971241707ce..33351deeea4f 100644
--- a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
@@ -81,6 +81,7 @@ static int live_gt_clocks(void *arg)
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ intel_wakeref_t wakeref;
int err = 0;
if (!gt->clock_frequency) { /* unknown */
@@ -91,7 +92,7 @@ static int live_gt_clocks(void *arg)
if (GRAPHICS_VER(gt->i915) < 4) /* Any CS_TIMESTAMP? */
return 0;
- intel_gt_pm_get(gt);
+ wakeref = intel_gt_pm_get(gt);
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
for_each_engine(engine, gt, id) {
@@ -128,7 +129,7 @@ static int live_gt_clocks(void *arg)
}
intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
- intel_gt_pm_put(gt);
+ intel_gt_pm_put(gt, wakeref);
return err;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 5f826b6dcf5d..e17b8777d21d 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -1555,7 +1555,7 @@ static int live_lrc_isolation(void *arg)
return err;
}
-static int indirect_ctx_submit_req(struct intel_context *ce)
+static int wabb_ctx_submit_req(struct intel_context *ce)
{
struct i915_request *rq;
int err = 0;
@@ -1579,7 +1579,8 @@ static int indirect_ctx_submit_req(struct intel_context *ce)
#define CTX_BB_CANARY_INDEX (CTX_BB_CANARY_OFFSET / sizeof(u32))
static u32 *
-emit_indirect_ctx_bb_canary(const struct intel_context *ce, u32 *cs)
+emit_wabb_ctx_canary(const struct intel_context *ce,
+ u32 *cs, bool per_ctx)
{
*cs++ = MI_STORE_REGISTER_MEM_GEN8 |
MI_SRM_LRM_GLOBAL_GTT |
@@ -1587,26 +1588,43 @@ emit_indirect_ctx_bb_canary(const struct intel_context *ce, u32 *cs)
*cs++ = i915_mmio_reg_offset(RING_START(0));
*cs++ = i915_ggtt_offset(ce->state) +
context_wa_bb_offset(ce) +
- CTX_BB_CANARY_OFFSET;
+ CTX_BB_CANARY_OFFSET +
+ (per_ctx ? PAGE_SIZE : 0);
*cs++ = 0;
return cs;
}
+static u32 *
+emit_indirect_ctx_bb_canary(const struct intel_context *ce, u32 *cs)
+{
+ return emit_wabb_ctx_canary(ce, cs, false);
+}
+
+static u32 *
+emit_per_ctx_bb_canary(const struct intel_context *ce, u32 *cs)
+{
+ return emit_wabb_ctx_canary(ce, cs, true);
+}
+
static void
-indirect_ctx_bb_setup(struct intel_context *ce)
+wabb_ctx_setup(struct intel_context *ce, bool per_ctx)
{
- u32 *cs = context_indirect_bb(ce);
+ u32 *cs = context_wabb(ce, per_ctx);
cs[CTX_BB_CANARY_INDEX] = 0xdeadf00d;
- setup_indirect_ctx_bb(ce, ce->engine, emit_indirect_ctx_bb_canary);
+ if (per_ctx)
+ setup_per_ctx_bb(ce, ce->engine, emit_per_ctx_bb_canary);
+ else
+ setup_indirect_ctx_bb(ce, ce->engine, emit_indirect_ctx_bb_canary);
}
-static bool check_ring_start(struct intel_context *ce)
+static bool check_ring_start(struct intel_context *ce, bool per_ctx)
{
const u32 * const ctx_bb = (void *)(ce->lrc_reg_state) -
- LRC_STATE_OFFSET + context_wa_bb_offset(ce);
+ LRC_STATE_OFFSET + context_wa_bb_offset(ce) +
+ (per_ctx ? PAGE_SIZE : 0);
if (ctx_bb[CTX_BB_CANARY_INDEX] == ce->lrc_reg_state[CTX_RING_START])
return true;
@@ -1618,21 +1636,21 @@ static bool check_ring_start(struct intel_context *ce)
return false;
}
-static int indirect_ctx_bb_check(struct intel_context *ce)
+static int wabb_ctx_check(struct intel_context *ce, bool per_ctx)
{
int err;
- err = indirect_ctx_submit_req(ce);
+ err = wabb_ctx_submit_req(ce);
if (err)
return err;
- if (!check_ring_start(ce))
+ if (!check_ring_start(ce, per_ctx))
return -EINVAL;
return 0;
}
-static int __live_lrc_indirect_ctx_bb(struct intel_engine_cs *engine)
+static int __lrc_wabb_ctx(struct intel_engine_cs *engine, bool per_ctx)
{
struct intel_context *a, *b;
int err;
@@ -1667,14 +1685,14 @@ static int __live_lrc_indirect_ctx_bb(struct intel_engine_cs *engine)
* As ring start is restored apriori of starting the indirect ctx bb and
* as it will be different for each context, it fits to this purpose.
*/
- indirect_ctx_bb_setup(a);
- indirect_ctx_bb_setup(b);
+ wabb_ctx_setup(a, per_ctx);
+ wabb_ctx_setup(b, per_ctx);
- err = indirect_ctx_bb_check(a);
+ err = wabb_ctx_check(a, per_ctx);
if (err)
goto unpin_b;
- err = indirect_ctx_bb_check(b);
+ err = wabb_ctx_check(b, per_ctx);
unpin_b:
intel_context_unpin(b);
@@ -1688,7 +1706,7 @@ put_a:
return err;
}
-static int live_lrc_indirect_ctx_bb(void *arg)
+static int lrc_wabb_ctx(void *arg, bool per_ctx)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
@@ -1697,7 +1715,7 @@ static int live_lrc_indirect_ctx_bb(void *arg)
for_each_engine(engine, gt, id) {
intel_engine_pm_get(engine);
- err = __live_lrc_indirect_ctx_bb(engine);
+ err = __lrc_wabb_ctx(engine, per_ctx);
intel_engine_pm_put(engine);
if (igt_flush_test(gt->i915))
@@ -1710,6 +1728,16 @@ static int live_lrc_indirect_ctx_bb(void *arg)
return err;
}
+static int live_lrc_indirect_ctx_bb(void *arg)
+{
+ return lrc_wabb_ctx(arg, false);
+}
+
+static int live_lrc_per_ctx_bb(void *arg)
+{
+ return lrc_wabb_ctx(arg, true);
+}
+
static void garbage_reset(struct intel_engine_cs *engine,
struct i915_request *rq)
{
@@ -1947,6 +1975,7 @@ int intel_lrc_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_lrc_garbage),
SUBTEST(live_pphwsp_runtime),
SUBTEST(live_lrc_indirect_ctx_bb),
+ SUBTEST(live_lrc_per_ctx_bb),
};
if (!HAS_LOGICAL_RING_CONTEXTS(i915))
diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c
index 79aa6ac66ad2..f40de408cd3a 100644
--- a/drivers/gpu/drm/i915/gt/selftest_reset.c
+++ b/drivers/gpu/drm/i915/gt/selftest_reset.c
@@ -261,11 +261,12 @@ static int igt_atomic_reset(void *arg)
{
struct intel_gt *gt = arg;
const typeof(*igt_atomic_phases) *p;
+ intel_wakeref_t wakeref;
int err = 0;
/* Check that the resets are usable from atomic context */
- intel_gt_pm_get(gt);
+ wakeref = intel_gt_pm_get(gt);
igt_global_reset_lock(gt);
/* Flush any requests before we get started and check basics */
@@ -296,7 +297,7 @@ static int igt_atomic_reset(void *arg)
unlock:
igt_global_reset_unlock(gt);
- intel_gt_pm_put(gt);
+ intel_gt_pm_put(gt, wakeref);
return err;
}
@@ -307,6 +308,7 @@ static int igt_atomic_engine_reset(void *arg)
const typeof(*igt_atomic_phases) *p;
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ intel_wakeref_t wakeref;
int err = 0;
/* Check that the resets are usable from atomic context */
@@ -317,7 +319,7 @@ static int igt_atomic_engine_reset(void *arg)
if (intel_uc_uses_guc_submission(&gt->uc))
return 0;
- intel_gt_pm_get(gt);
+ wakeref = intel_gt_pm_get(gt);
igt_global_reset_lock(gt);
/* Flush any requests before we get started and check basics */
@@ -365,7 +367,7 @@ static int igt_atomic_engine_reset(void *arg)
out_unlock:
igt_global_reset_unlock(gt);
- intel_gt_pm_put(gt);
+ intel_gt_pm_put(gt, wakeref);
return err;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c
index fb30f733b036..dcef8d498919 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rps.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rps.c
@@ -224,6 +224,7 @@ int live_rps_clock_interval(void *arg)
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct igt_spinner spin;
+ intel_wakeref_t wakeref;
int err = 0;
if (!intel_rps_is_enabled(rps) || GRAPHICS_VER(gt->i915) < 6)
@@ -236,7 +237,7 @@ int live_rps_clock_interval(void *arg)
saved_work = rps->work.func;
rps->work.func = dummy_rps_work;
- intel_gt_pm_get(gt);
+ wakeref = intel_gt_pm_get(gt);
intel_rps_disable(&gt->rps);
intel_gt_check_clock_frequency(gt);
@@ -355,7 +356,7 @@ int live_rps_clock_interval(void *arg)
}
intel_rps_enable(&gt->rps);
- intel_gt_pm_put(gt);
+ intel_gt_pm_put(gt, wakeref);
igt_spinner_fini(&spin);
@@ -376,6 +377,7 @@ int live_rps_control(void *arg)
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct igt_spinner spin;
+ intel_wakeref_t wakeref;
int err = 0;
/*
@@ -398,7 +400,7 @@ int live_rps_control(void *arg)
saved_work = rps->work.func;
rps->work.func = dummy_rps_work;
- intel_gt_pm_get(gt);
+ wakeref = intel_gt_pm_get(gt);
for_each_engine(engine, gt, id) {
struct i915_request *rq;
ktime_t min_dt, max_dt;
@@ -488,7 +490,7 @@ int live_rps_control(void *arg)
break;
}
}
- intel_gt_pm_put(gt);
+ intel_gt_pm_put(gt, wakeref);
igt_spinner_fini(&spin);
@@ -1023,6 +1025,7 @@ int live_rps_interrupt(void *arg)
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct igt_spinner spin;
+ intel_wakeref_t wakeref;
u32 pm_events;
int err = 0;
@@ -1033,9 +1036,9 @@ int live_rps_interrupt(void *arg)
if (!intel_rps_has_interrupts(rps) || GRAPHICS_VER(gt->i915) < 6)
return 0;
- intel_gt_pm_get(gt);
- pm_events = rps->pm_events;
- intel_gt_pm_put(gt);
+ pm_events = 0;
+ with_intel_gt_pm(gt, wakeref)
+ pm_events = rps->pm_events;
if (!pm_events) {
pr_err("No RPS PM events registered, but RPS is enabled?\n");
return -ENODEV;
diff --git a/drivers/gpu/drm/i915/gt/selftest_slpc.c b/drivers/gpu/drm/i915/gt/selftest_slpc.c
index 952c8d52d68a..302d0540295d 100644
--- a/drivers/gpu/drm/i915/gt/selftest_slpc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_slpc.c
@@ -266,6 +266,7 @@ static int run_test(struct intel_gt *gt, int test_type)
struct intel_rps *rps = &gt->rps;
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ intel_wakeref_t wakeref;
struct igt_spinner spin;
u32 slpc_min_freq, slpc_max_freq;
int err = 0;
@@ -311,7 +312,7 @@ static int run_test(struct intel_gt *gt, int test_type)
}
intel_gt_pm_wait_for_idle(gt);
- intel_gt_pm_get(gt);
+ wakeref = intel_gt_pm_get(gt);
for_each_engine(engine, gt, id) {
struct i915_request *rq;
u32 max_act_freq;
@@ -397,7 +398,7 @@ static int run_test(struct intel_gt *gt, int test_type)
if (igt_flush_test(gt->i915))
err = -EIO;
- intel_gt_pm_put(gt);
+ intel_gt_pm_put(gt, wakeref);
igt_spinner_fini(&spin);
intel_gt_pm_wait_for_idle(gt);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c
index 5f138de3c14f..40817ebcca71 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c
@@ -322,6 +322,7 @@ static int i915_gsc_proxy_component_bind(struct device *i915_kdev,
gsc->proxy.component = data;
gsc->proxy.component->mei_dev = mei_kdev;
mutex_unlock(&gsc->proxy.mutex);
+ gt_dbg(gt, "GSC proxy mei component bound\n");
return 0;
}
@@ -342,6 +343,7 @@ static void i915_gsc_proxy_component_unbind(struct device *i915_kdev,
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
intel_uncore_rmw(gt->uncore, HECI_H_CSR(MTL_GSC_HECI2_BASE),
HECI_H_CSR_IE | HECI_H_CSR_RST, 0);
+ gt_dbg(gt, "GSC proxy mei component unbound\n");
}
static const struct component_ops i915_gsc_proxy_component_ops = {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 3f3df1166b86..2b450c43bbd7 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -330,7 +330,7 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc)
static u32 guc_ctl_devid(struct intel_guc *guc)
{
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct drm_i915_private *i915 = guc_to_i915(guc);
return (INTEL_DEVID(i915) << 16) | INTEL_REVID(i915);
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index 2b6dfe62c8f2..813cc888e6fa 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -105,61 +105,67 @@ struct intel_guc {
*/
struct {
/**
- * @lock: protects everything in submission_state,
- * ce->guc_id.id, and ce->guc_id.ref when transitioning in and
- * out of zero
+ * @submission_state.lock: protects everything in
+ * submission_state, ce->guc_id.id, and ce->guc_id.ref
+ * when transitioning in and out of zero
*/
spinlock_t lock;
/**
- * @guc_ids: used to allocate new guc_ids, single-lrc
+ * @submission_state.guc_ids: used to allocate new
+ * guc_ids, single-lrc
*/
struct ida guc_ids;
/**
- * @num_guc_ids: Number of guc_ids, selftest feature to be able
- * to reduce this number while testing.
+ * @submission_state.num_guc_ids: Number of guc_ids, selftest
+ * feature to be able to reduce this number while testing.
*/
int num_guc_ids;
/**
- * @guc_ids_bitmap: used to allocate new guc_ids, multi-lrc
+ * @submission_state.guc_ids_bitmap: used to allocate
+ * new guc_ids, multi-lrc
*/
unsigned long *guc_ids_bitmap;
/**
- * @guc_id_list: list of intel_context with valid guc_ids but no
- * refs
+ * @submission_state.guc_id_list: list of intel_context
+ * with valid guc_ids but no refs
*/
struct list_head guc_id_list;
/**
- * @guc_ids_in_use: Number single-lrc guc_ids in use
+ * @submission_state.guc_ids_in_use: Number single-lrc
+ * guc_ids in use
*/
unsigned int guc_ids_in_use;
/**
- * @destroyed_contexts: list of contexts waiting to be destroyed
- * (deregistered with the GuC)
+ * @submission_state.destroyed_contexts: list of contexts
+ * waiting to be destroyed (deregistered with the GuC)
*/
struct list_head destroyed_contexts;
/**
- * @destroyed_worker: worker to deregister contexts, need as we
- * need to take a GT PM reference and can't from destroy
- * function as it might be in an atomic context (no sleeping)
+ * @submission_state.destroyed_worker: worker to deregister
+ * contexts, need as we need to take a GT PM reference and
+ * can't from destroy function as it might be in an atomic
+ * context (no sleeping)
*/
struct work_struct destroyed_worker;
/**
- * @reset_fail_worker: worker to trigger a GT reset after an
- * engine reset fails
+ * @submission_state.reset_fail_worker: worker to trigger
+ * a GT reset after an engine reset fails
*/
struct work_struct reset_fail_worker;
/**
- * @reset_fail_mask: mask of engines that failed to reset
+ * @submission_state.reset_fail_mask: mask of engines that
+ * failed to reset
*/
intel_engine_mask_t reset_fail_mask;
/**
- * @sched_disable_delay_ms: schedule disable delay, in ms, for
- * contexts
+ * @submission_state.sched_disable_delay_ms: schedule
+ * disable delay, in ms, for contexts
*/
unsigned int sched_disable_delay_ms;
/**
- * @sched_disable_gucid_threshold: threshold of min remaining available
- * guc_ids before we start bypassing the schedule disable delay
+ * @submission_state.sched_disable_gucid_threshold:
+ * threshold of min remaining available guc_ids before
+ * we start bypassing the schedule disable delay
*/
unsigned int sched_disable_gucid_threshold;
} submission_state;
@@ -243,37 +249,40 @@ struct intel_guc {
*/
struct {
/**
- * @lock: Lock protecting the below fields and the engine stats.
+ * @timestamp.lock: Lock protecting the below fields and
+ * the engine stats.
*/
spinlock_t lock;
/**
- * @gt_stamp: 64 bit extended value of the GT timestamp.
+ * @timestamp.gt_stamp: 64-bit extended value of the GT
+ * timestamp.
*/
u64 gt_stamp;
/**
- * @ping_delay: Period for polling the GT timestamp for
- * overflow.
+ * @timestamp.ping_delay: Period for polling the GT
+ * timestamp for overflow.
*/
unsigned long ping_delay;
/**
- * @work: Periodic work to adjust GT timestamp, engine and
- * context usage for overflows.
+ * @timestamp.work: Periodic work to adjust GT timestamp,
+ * engine and context usage for overflows.
*/
struct delayed_work work;
/**
- * @shift: Right shift value for the gpm timestamp
+ * @timestamp.shift: Right shift value for the gpm timestamp
*/
u32 shift;
/**
- * @last_stat_jiffies: jiffies at last actual stats collection time
- * We use this timestamp to ensure we don't oversample the
- * stats because runtime power management events can trigger
- * stats collection at much higher rates than required.
+ * @timestamp.last_stat_jiffies: jiffies at last actual
+ * stats collection time. We use this timestamp to ensure
+ * we don't oversample the stats because runtime power
+ * management events can trigger stats collection at much
+ * higher rates than required.
*/
unsigned long last_stat_jiffies;
} timestamp;
@@ -297,6 +306,10 @@ struct intel_guc {
* @number_guc_id_stolen: The number of guc_ids that have been stolen
*/
int number_guc_id_stolen;
+ /**
+ * @fast_response_selftest: Backdoor to CT handler for fast response selftest
+ */
+ u32 fast_response_selftest;
#endif
};
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
index a4da0208c883..a1cd40d80517 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
@@ -355,7 +355,7 @@ guc_capture_alloc_steered_lists(struct intel_guc *guc,
static const struct __guc_mmio_reg_descr_group *
guc_capture_get_device_reglist(struct intel_guc *guc)
{
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct drm_i915_private *i915 = guc_to_i915(guc);
const struct __guc_mmio_reg_descr_group *lists;
if (GRAPHICS_VER(i915) >= 12)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
index 89e314b3756b..0d5197c0824a 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
@@ -265,7 +265,7 @@ int intel_guc_ct_init(struct intel_guc_ct *ct)
u32 *cmds;
int err;
- err = i915_inject_probe_error(guc_to_gt(guc)->i915, -ENXIO);
+ err = i915_inject_probe_error(guc_to_i915(guc), -ENXIO);
if (err)
return err;
@@ -1076,6 +1076,15 @@ static int ct_handle_response(struct intel_guc_ct *ct, struct ct_incoming_msg *r
found = true;
break;
}
+
+#ifdef CONFIG_DRM_I915_SELFTEST
+ if (!found && ct_to_guc(ct)->fast_response_selftest) {
+ CT_DEBUG(ct, "Assuming unsolicited response due to FAST_REQUEST selftest\n");
+ ct_to_guc(ct)->fast_response_selftest++;
+ found = true;
+ }
+#endif
+
if (!found) {
CT_ERROR(ct, "Unsolicited response message: len %u, data %#x (fence %u, last %u)\n",
len, hxg[0], fence, ct->requests.last_fence);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
index 55bc8b55fbc0..bf16351c9349 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
@@ -520,7 +520,7 @@ void intel_guc_log_init_early(struct intel_guc_log *log)
static int guc_log_relay_create(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct drm_i915_private *i915 = guc_to_i915(guc);
struct rchan *guc_log_relay_chan;
size_t n_subbufs, subbuf_size;
int ret;
@@ -573,7 +573,7 @@ static void guc_log_relay_destroy(struct intel_guc_log *log)
static void guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct drm_i915_private *i915 = guc_to_i915(guc);
intel_wakeref_t wakeref;
_guc_log_copy_debuglogs_for_relay(log);
@@ -589,7 +589,7 @@ static void guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log)
static u32 __get_default_log_level(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct drm_i915_private *i915 = guc_to_i915(guc);
/* A negative value means "use platform/config default" */
if (i915->params.guc_log_level < 0) {
@@ -664,7 +664,7 @@ void intel_guc_log_destroy(struct intel_guc_log *log)
int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct drm_i915_private *i915 = guc_to_i915(guc);
intel_wakeref_t wakeref;
int ret = 0;
@@ -796,7 +796,7 @@ void intel_guc_log_relay_flush(struct intel_guc_log *log)
static void guc_log_relay_stop(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct drm_i915_private *i915 = guc_to_i915(guc);
if (!log->relay.started)
return;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c
index 1adec6de223c..9df7927304ae 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c
@@ -14,7 +14,7 @@ static bool __guc_rc_supported(struct intel_guc *guc)
{
/* GuC RC is unavailable for pre-Gen12 */
return guc->submission_supported &&
- GRAPHICS_VER(guc_to_gt(guc)->i915) >= 12;
+ GRAPHICS_VER(guc_to_i915(guc)) >= 12;
}
static bool __guc_rc_selected(struct intel_guc *guc)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
index 2dfb07cc4b33..3e681ab6fbf9 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
@@ -34,7 +34,7 @@ static bool __detect_slpc_supported(struct intel_guc *guc)
{
/* GuC SLPC is unavailable for pre-Gen12 */
return guc->submission_supported &&
- GRAPHICS_VER(guc_to_gt(guc)->i915) >= 12;
+ GRAPHICS_VER(guc_to_i915(guc)) >= 12;
}
static bool __guc_slpc_selected(struct intel_guc *guc)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index d37698bd6b91..a259f1118c5a 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -1107,7 +1107,7 @@ static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc)
if (deregister)
guc_signal_context_fence(ce);
if (destroyed) {
- intel_gt_pm_put_async(guc_to_gt(guc));
+ intel_gt_pm_put_async_untracked(guc_to_gt(guc));
release_guc_id(guc, ce);
__guc_context_destroy(ce);
}
@@ -1303,6 +1303,7 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
unsigned long flags;
u32 reset_count;
bool in_reset;
+ intel_wakeref_t wakeref;
spin_lock_irqsave(&guc->timestamp.lock, flags);
@@ -1325,7 +1326,8 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
* start_gt_clk is derived from GuC state. To get a consistent
* view of activity, we query the GuC state only if gt is awake.
*/
- if (!in_reset && intel_gt_pm_get_if_awake(gt)) {
+ wakeref = in_reset ? 0 : intel_gt_pm_get_if_awake(gt);
+ if (wakeref) {
stats_saved = *stats;
gt_stamp_saved = guc->timestamp.gt_stamp;
/*
@@ -1334,7 +1336,7 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
*/
guc_update_engine_gt_clks(engine);
guc_update_pm_timestamp(guc, now);
- intel_gt_pm_put_async(gt);
+ intel_gt_pm_put_async(gt, wakeref);
if (i915_reset_count(gpu_error) != reset_count) {
*stats = stats_saved;
guc->timestamp.gt_stamp = gt_stamp_saved;
@@ -3385,9 +3387,9 @@ static void destroyed_worker_func(struct work_struct *w)
struct intel_guc *guc = container_of(w, struct intel_guc,
submission_state.destroyed_worker);
struct intel_gt *gt = guc_to_gt(guc);
- int tmp;
+ intel_wakeref_t wakeref;
- with_intel_gt_pm(gt, tmp)
+ with_intel_gt_pm(gt, wakeref)
deregister_destroyed_contexts(guc);
}
@@ -4624,12 +4626,12 @@ static bool __guc_submission_supported(struct intel_guc *guc)
{
/* GuC submission is unavailable for pre-Gen11 */
return intel_guc_is_supported(guc) &&
- GRAPHICS_VER(guc_to_gt(guc)->i915) >= 11;
+ GRAPHICS_VER(guc_to_i915(guc)) >= 11;
}
static bool __guc_submission_selected(struct intel_guc *guc)
{
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct drm_i915_private *i915 = guc_to_i915(guc);
if (!intel_guc_submission_is_supported(guc))
return false;
@@ -4894,7 +4896,7 @@ int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
intel_context_put(ce);
} else if (context_destroyed(ce)) {
/* Context has been destroyed */
- intel_gt_pm_put_async(guc_to_gt(guc));
+ intel_gt_pm_put_async_untracked(guc_to_gt(guc));
release_guc_id(guc, ce);
__guc_context_destroy(ce);
}
@@ -5001,7 +5003,8 @@ static void capture_error_state(struct intel_guc *guc,
if (match) {
intel_engine_set_hung_context(e, ce);
engine_mask |= e->mask;
- atomic_inc(&i915->gpu_error.reset_engine_count[e->uabi_class]);
+ i915_increase_reset_engine_count(&i915->gpu_error,
+ e);
}
}
@@ -5013,7 +5016,7 @@ static void capture_error_state(struct intel_guc *guc,
} else {
intel_engine_set_hung_context(ce->engine, ce);
engine_mask = ce->engine->mask;
- atomic_inc(&i915->gpu_error.reset_engine_count[ce->engine->uabi_class]);
+ i915_increase_reset_engine_count(&i915->gpu_error, ce->engine);
}
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 27f6561dd731..3872d309ed31 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -106,11 +106,6 @@ static void __confirm_options(struct intel_uc *uc)
gt_info(gt, "Incompatible option enable_guc=%d - %s\n",
i915->params.enable_guc, "GuC is not supported!");
- if (i915->params.enable_guc & ENABLE_GUC_LOAD_HUC &&
- !intel_uc_supports_huc(uc))
- gt_info(gt, "Incompatible option enable_guc=%d - %s\n",
- i915->params.enable_guc, "HuC is not supported!");
-
if (i915->params.enable_guc & ENABLE_GUC_SUBMISSION &&
!intel_uc_supports_guc_submission(uc))
gt_info(gt, "Incompatible option enable_guc=%d - %s\n",
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 362639162ed6..756093eaf2ad 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -1343,16 +1343,13 @@ size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len)
for_each_sgt_page(page, iter, uc_fw->obj->mm.pages) {
u32 len = min_t(u32, size, PAGE_SIZE - offset);
- void *vaddr;
if (idx > 0) {
idx--;
continue;
}
- vaddr = kmap_atomic(page);
- memcpy(dst, vaddr + offset, len);
- kunmap_atomic(vaddr);
+ memcpy_from_page(dst, page, offset, len);
offset = 0;
dst += len;
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
index bfb72143566f..c900aac85adb 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
@@ -286,11 +286,126 @@ err_wakeref:
return ret;
}
+/*
+ * Send a context schedule H2G message with an invalid context id.
+ * This should generate a GUC_RESULT_INVALID_CONTEXT response.
+ */
+static int bad_h2g(struct intel_guc *guc)
+{
+ u32 action[] = {
+ INTEL_GUC_ACTION_SCHED_CONTEXT,
+ 0x12345678,
+ };
+
+ return intel_guc_send_nb(guc, action, ARRAY_SIZE(action), 0);
+}
+
+/*
+ * Set a spinner running to make sure the system is alive and active,
+ * then send a bad but asynchronous H2G command and wait to see if an
+ * error response is returned. If no response is received or if the
+ * spinner dies then the test will fail.
+ */
+#define FAST_RESPONSE_TIMEOUT_MS 1000
+static int intel_guc_fast_request(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_context *ce;
+ struct igt_spinner spin;
+ struct i915_request *rq;
+ intel_wakeref_t wakeref;
+ struct intel_engine_cs *engine = intel_selftest_find_any_engine(gt);
+ bool spinning = false;
+ int ret = 0;
+
+ if (!engine)
+ return 0;
+
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ ret = PTR_ERR(ce);
+ gt_err(gt, "Failed to create spinner request: %pe\n", ce);
+ goto err_pm;
+ }
+
+ ret = igt_spinner_init(&spin, engine->gt);
+ if (ret) {
+ gt_err(gt, "Failed to create spinner: %pe\n", ERR_PTR(ret));
+ goto err_pm;
+ }
+ spinning = true;
+
+ rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ ret = PTR_ERR(rq);
+ gt_err(gt, "Failed to create spinner request: %pe\n", rq);
+ goto err_spin;
+ }
+
+ ret = request_add_spin(rq, &spin);
+ if (ret) {
+ gt_err(gt, "Failed to add Spinner request: %pe\n", ERR_PTR(ret));
+ goto err_rq;
+ }
+
+ gt->uc.guc.fast_response_selftest = 1;
+
+ ret = bad_h2g(&gt->uc.guc);
+ if (ret) {
+ gt_err(gt, "Failed to send H2G: %pe\n", ERR_PTR(ret));
+ goto err_rq;
+ }
+
+ ret = wait_for(gt->uc.guc.fast_response_selftest != 1 || i915_request_completed(rq),
+ FAST_RESPONSE_TIMEOUT_MS);
+ if (ret) {
+ gt_err(gt, "Request wait failed: %pe\n", ERR_PTR(ret));
+ goto err_rq;
+ }
+
+ if (i915_request_completed(rq)) {
+ gt_err(gt, "Spinner died waiting for fast request error!\n");
+ ret = -EIO;
+ goto err_rq;
+ }
+
+ if (gt->uc.guc.fast_response_selftest != 2) {
+ gt_err(gt, "Unexpected fast response count: %d\n",
+ gt->uc.guc.fast_response_selftest);
+ goto err_rq;
+ }
+
+ igt_spinner_end(&spin);
+ spinning = false;
+
+ ret = intel_selftest_wait_for_rq(rq);
+ if (ret) {
+ gt_err(gt, "Request failed to complete: %pe\n", ERR_PTR(ret));
+ goto err_rq;
+ }
+
+err_rq:
+ i915_request_put(rq);
+
+err_spin:
+ if (spinning)
+ igt_spinner_end(&spin);
+ igt_spinner_fini(&spin);
+
+err_pm:
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+ return ret;
+}
+
int intel_guc_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(intel_guc_scrub_ctbs),
SUBTEST(intel_guc_steal_guc_ids),
+ SUBTEST(intel_guc_fast_request),
};
struct intel_gt *gt = to_gt(i915);
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
index 34b5d952e2bc..26fdc392fce6 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
@@ -74,7 +74,7 @@ static int intel_hang_guc(void *arg)
goto err;
}
- rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
+ rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
intel_context_put(ce);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index de3f5903d1a7..c8e7dfc9f791 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -422,7 +422,7 @@ static void init_irq_map(struct intel_gvt_irq *irq)
#define MSI_CAP_DATA(offset) (offset + 8)
#define MSI_CAP_EN 0x1
-static int inject_virtual_interrupt(struct intel_vgpu *vgpu)
+static void inject_virtual_interrupt(struct intel_vgpu *vgpu)
{
unsigned long offset = vgpu->gvt->device_info.msi_cap_offset;
u16 control, data;
@@ -434,10 +434,10 @@ static int inject_virtual_interrupt(struct intel_vgpu *vgpu)
/* Do not generate MSI if MSIEN is disabled */
if (!(control & MSI_CAP_EN))
- return 0;
+ return;
if (WARN(control & GENMASK(15, 1), "only support one MSI format\n"))
- return -EINVAL;
+ return;
trace_inject_msi(vgpu->id, addr, data);
@@ -451,10 +451,9 @@ static int inject_virtual_interrupt(struct intel_vgpu *vgpu)
* returned and don't inject interrupt into guest.
*/
if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
- return -ESRCH;
- if (vgpu->msi_trigger && eventfd_signal(vgpu->msi_trigger, 1) != 1)
- return -EFAULT;
- return 0;
+ return;
+ if (vgpu->msi_trigger)
+ eventfd_signal(vgpu->msi_trigger);
}
static void propagate_event(struct intel_gvt_irq *irq,
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index ddf49c2dbb91..2905df83e180 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -1211,11 +1211,11 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
for (n = offset >> PAGE_SHIFT; remain; n++) {
int len = min(remain, PAGE_SIZE - x);
- src = kmap_atomic(i915_gem_object_get_page(src_obj, n));
+ src = kmap_local_page(i915_gem_object_get_page(src_obj, n));
if (src_needs_clflush)
drm_clflush_virt_range(src + x, len);
memcpy(ptr, src + x, len);
- kunmap_atomic(src);
+ kunmap_local(src);
ptr += len;
remain -= len;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index bfe92d2402ea..db99c2ef66db 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -51,6 +51,7 @@
#include "i915_debugfs.h"
#include "i915_debugfs_params.h"
#include "i915_driver.h"
+#include "i915_gpu_error.h"
#include "i915_irq.h"
#include "i915_reg.h"
#include "i915_scheduler.h"
@@ -299,107 +300,6 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
return 0;
}
-#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
-static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
- size_t count, loff_t *pos)
-{
- struct i915_gpu_coredump *error;
- ssize_t ret;
- void *buf;
-
- error = file->private_data;
- if (!error)
- return 0;
-
- /* Bounce buffer required because of kernfs __user API convenience. */
- buf = kmalloc(count, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- ret = i915_gpu_coredump_copy_to_buffer(error, buf, *pos, count);
- if (ret <= 0)
- goto out;
-
- if (!copy_to_user(ubuf, buf, ret))
- *pos += ret;
- else
- ret = -EFAULT;
-
-out:
- kfree(buf);
- return ret;
-}
-
-static int gpu_state_release(struct inode *inode, struct file *file)
-{
- i915_gpu_coredump_put(file->private_data);
- return 0;
-}
-
-static int i915_gpu_info_open(struct inode *inode, struct file *file)
-{
- struct drm_i915_private *i915 = inode->i_private;
- struct i915_gpu_coredump *gpu;
- intel_wakeref_t wakeref;
-
- gpu = NULL;
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- gpu = i915_gpu_coredump(to_gt(i915), ALL_ENGINES, CORE_DUMP_FLAG_NONE);
-
- if (IS_ERR(gpu))
- return PTR_ERR(gpu);
-
- file->private_data = gpu;
- return 0;
-}
-
-static const struct file_operations i915_gpu_info_fops = {
- .owner = THIS_MODULE,
- .open = i915_gpu_info_open,
- .read = gpu_state_read,
- .llseek = default_llseek,
- .release = gpu_state_release,
-};
-
-static ssize_t
-i915_error_state_write(struct file *filp,
- const char __user *ubuf,
- size_t cnt,
- loff_t *ppos)
-{
- struct i915_gpu_coredump *error = filp->private_data;
-
- if (!error)
- return 0;
-
- drm_dbg(&error->i915->drm, "Resetting error state\n");
- i915_reset_error_state(error->i915);
-
- return cnt;
-}
-
-static int i915_error_state_open(struct inode *inode, struct file *file)
-{
- struct i915_gpu_coredump *error;
-
- error = i915_first_error_state(inode->i_private);
- if (IS_ERR(error))
- return PTR_ERR(error);
-
- file->private_data = error;
- return 0;
-}
-
-static const struct file_operations i915_error_state_fops = {
- .owner = THIS_MODULE,
- .open = i915_error_state_open,
- .read = gpu_state_read,
- .write = i915_error_state_write,
- .llseek = default_llseek,
- .release = gpu_state_release,
-};
-#endif
-
static int i915_frequency_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
@@ -839,10 +739,6 @@ static const struct i915_debugfs_files {
{"i915_perf_noa_delay", &i915_perf_noa_delay_fops},
{"i915_wedged", &i915_wedged_fops},
{"i915_gem_drop_caches", &i915_drop_caches_fops},
-#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
- {"i915_error_state", &i915_error_state_fops},
- {"i915_gpu_info", &i915_gpu_info_fops},
-#endif
};
void i915_debugfs_register(struct drm_i915_private *dev_priv)
@@ -865,4 +761,6 @@ void i915_debugfs_register(struct drm_i915_private *dev_priv)
drm_debugfs_create_files(i915_debugfs_list,
ARRAY_SIZE(i915_debugfs_list),
minor->debugfs_root, minor);
+
+ i915_gpu_error_debugfs_register(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index 2a1faf403965..c7d7c3b7ecc6 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -798,7 +798,9 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_cleanup_modeset2;
- intel_pxp_init(i915);
+ ret = intel_pxp_init(i915);
+ if (ret != -ENODEV)
+ drm_dbg(&i915->drm, "pxp init failed with %d\n", ret);
ret = intel_display_driver_probe(i915);
if (ret)
@@ -1033,7 +1035,7 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
intel_power_domains_driver_remove(i915);
enable_rpm_wakeref_asserts(&i915->runtime_pm);
- intel_runtime_pm_driver_release(&i915->runtime_pm);
+ intel_runtime_pm_driver_last_release(&i915->runtime_pm);
}
static bool suspend_to_idle(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_drm_client.c b/drivers/gpu/drm/i915/i915_drm_client.c
index 2a44b3876cb5..fa6852713bee 100644
--- a/drivers/gpu/drm/i915/i915_drm_client.c
+++ b/drivers/gpu/drm/i915/i915_drm_client.c
@@ -28,6 +28,10 @@ struct i915_drm_client *i915_drm_client_alloc(void)
kref_init(&client->kref);
spin_lock_init(&client->ctx_lock);
INIT_LIST_HEAD(&client->ctx_list);
+#ifdef CONFIG_PROC_FS
+ spin_lock_init(&client->objects_lock);
+ INIT_LIST_HEAD(&client->objects_list);
+#endif
return client;
}
@@ -41,6 +45,68 @@ void __i915_drm_client_free(struct kref *kref)
}
#ifdef CONFIG_PROC_FS
+static void
+obj_meminfo(struct drm_i915_gem_object *obj,
+ struct drm_memory_stats stats[INTEL_REGION_UNKNOWN])
+{
+ const enum intel_region_id id = obj->mm.region ?
+ obj->mm.region->id : INTEL_REGION_SMEM;
+ const u64 sz = obj->base.size;
+
+ if (obj->base.handle_count > 1)
+ stats[id].shared += sz;
+ else
+ stats[id].private += sz;
+
+ if (i915_gem_object_has_pages(obj)) {
+ stats[id].resident += sz;
+
+ if (!dma_resv_test_signaled(obj->base.resv,
+ DMA_RESV_USAGE_BOOKKEEP))
+ stats[id].active += sz;
+ else if (i915_gem_object_is_shrinkable(obj) &&
+ obj->mm.madv == I915_MADV_DONTNEED)
+ stats[id].purgeable += sz;
+ }
+}
+
+static void show_meminfo(struct drm_printer *p, struct drm_file *file)
+{
+ struct drm_memory_stats stats[INTEL_REGION_UNKNOWN] = {};
+ struct drm_i915_file_private *fpriv = file->driver_priv;
+ struct i915_drm_client *client = fpriv->client;
+ struct drm_i915_private *i915 = fpriv->i915;
+ struct drm_i915_gem_object *obj;
+ struct intel_memory_region *mr;
+ struct list_head __rcu *pos;
+ unsigned int id;
+
+ /* Public objects. */
+ spin_lock(&file->table_lock);
+ idr_for_each_entry(&file->object_idr, obj, id)
+ obj_meminfo(obj, stats);
+ spin_unlock(&file->table_lock);
+
+ /* Internal objects. */
+ rcu_read_lock();
+ list_for_each_rcu(pos, &client->objects_list) {
+ obj = i915_gem_object_get_rcu(list_entry(pos, typeof(*obj),
+ client_link));
+ if (!obj)
+ continue;
+ obj_meminfo(obj, stats);
+ i915_gem_object_put(obj);
+ }
+ rcu_read_unlock();
+
+ for_each_memory_region(mr, i915, id)
+ drm_print_memory_stats(p,
+ &stats[id],
+ DRM_GEM_OBJECT_RESIDENT |
+ DRM_GEM_OBJECT_PURGEABLE,
+ mr->uabi_name);
+}
+
static const char * const uabi_class_names[] = {
[I915_ENGINE_CLASS_RENDER] = "render",
[I915_ENGINE_CLASS_COPY] = "copy",
@@ -102,10 +168,52 @@ void i915_drm_client_fdinfo(struct drm_printer *p, struct drm_file *file)
* ******************************************************************
*/
+ show_meminfo(p, file);
+
if (GRAPHICS_VER(i915) < 8)
return;
for (i = 0; i < ARRAY_SIZE(uabi_class_names); i++)
show_client_class(p, i915, file_priv->client, i);
}
+
+void i915_drm_client_add_object(struct i915_drm_client *client,
+ struct drm_i915_gem_object *obj)
+{
+ unsigned long flags;
+
+ GEM_WARN_ON(obj->client);
+ GEM_WARN_ON(!list_empty(&obj->client_link));
+
+ spin_lock_irqsave(&client->objects_lock, flags);
+ obj->client = i915_drm_client_get(client);
+ list_add_tail_rcu(&obj->client_link, &client->objects_list);
+ spin_unlock_irqrestore(&client->objects_lock, flags);
+}
+
+void i915_drm_client_remove_object(struct drm_i915_gem_object *obj)
+{
+ struct i915_drm_client *client = fetch_and_zero(&obj->client);
+ unsigned long flags;
+
+ /* Object may not be associated with a client. */
+ if (!client)
+ return;
+
+ spin_lock_irqsave(&client->objects_lock, flags);
+ list_del_rcu(&obj->client_link);
+ spin_unlock_irqrestore(&client->objects_lock, flags);
+
+ i915_drm_client_put(client);
+}
+
+void i915_drm_client_add_context_objects(struct i915_drm_client *client,
+ struct intel_context *ce)
+{
+ if (ce->state)
+ i915_drm_client_add_object(client, ce->state->obj);
+
+ if (ce->ring != ce->engine->legacy.ring && ce->ring->vma)
+ i915_drm_client_add_object(client, ce->ring->vma->obj);
+}
#endif
diff --git a/drivers/gpu/drm/i915/i915_drm_client.h b/drivers/gpu/drm/i915/i915_drm_client.h
index 67816c912bca..a439dd789936 100644
--- a/drivers/gpu/drm/i915/i915_drm_client.h
+++ b/drivers/gpu/drm/i915/i915_drm_client.h
@@ -12,6 +12,10 @@
#include <uapi/drm/i915_drm.h>
+#include "i915_file_private.h"
+#include "gem/i915_gem_object_types.h"
+#include "gt/intel_context_types.h"
+
#define I915_LAST_UABI_ENGINE_CLASS I915_ENGINE_CLASS_COMPUTE
struct drm_file;
@@ -25,6 +29,20 @@ struct i915_drm_client {
spinlock_t ctx_lock; /* For add/remove from ctx_list. */
struct list_head ctx_list; /* List of contexts belonging to client. */
+#ifdef CONFIG_PROC_FS
+ /**
+ * @objects_lock: lock protecting @objects_list
+ */
+ spinlock_t objects_lock;
+
+ /**
+ * @objects_list: list of objects created by this client
+ *
+ * Protected by @objects_lock.
+ */
+ struct list_head objects_list;
+#endif
+
/**
* @past_runtime: Accumulation of pphwsp runtimes from closed contexts.
*/
@@ -49,4 +67,28 @@ struct i915_drm_client *i915_drm_client_alloc(void);
void i915_drm_client_fdinfo(struct drm_printer *p, struct drm_file *file);
+#ifdef CONFIG_PROC_FS
+void i915_drm_client_add_object(struct i915_drm_client *client,
+ struct drm_i915_gem_object *obj);
+void i915_drm_client_remove_object(struct drm_i915_gem_object *obj);
+void i915_drm_client_add_context_objects(struct i915_drm_client *client,
+ struct intel_context *ce);
+#else
+static inline void i915_drm_client_add_object(struct i915_drm_client *client,
+ struct drm_i915_gem_object *obj)
+{
+}
+
+static inline void
+i915_drm_client_remove_object(struct drm_i915_gem_object *obj)
+{
+}
+
+static inline void
+i915_drm_client_add_context_objects(struct i915_drm_client *client,
+ struct intel_context *ce)
+{
+}
+#endif
+
#endif /* !__I915_DRM_CLIENT_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 0971f4976324..d04660b60046 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -57,6 +57,7 @@
#include "i915_memcpy.h"
#include "i915_reg.h"
#include "i915_scatterlist.h"
+#include "i915_sysfs.h"
#include "i915_utils.h"
#define ALLOW_FAIL (__GFP_KSWAPD_RECLAIM | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
@@ -520,7 +521,7 @@ __find_vma(struct i915_vma_coredump *vma, const char *name)
return NULL;
}
-struct i915_vma_coredump *
+static struct i915_vma_coredump *
intel_gpu_error_find_batch(const struct intel_engine_coredump *ee)
{
return __find_vma(ee->vma, "batch");
@@ -609,9 +610,9 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
va_end(args);
}
-void intel_gpu_error_print_vma(struct drm_i915_error_state_buf *m,
- const struct intel_engine_cs *engine,
- const struct i915_vma_coredump *vma)
+static void intel_gpu_error_print_vma(struct drm_i915_error_state_buf *m,
+ const struct intel_engine_cs *engine,
+ const struct i915_vma_coredump *vma)
{
char out[ASCII85_BUFSZ];
struct page *page;
@@ -2140,7 +2141,7 @@ __i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 du
return error;
}
-struct i915_gpu_coredump *
+static struct i915_gpu_coredump *
i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 dump_flags)
{
static DEFINE_MUTEX(capture_mutex);
@@ -2211,7 +2212,7 @@ void i915_capture_error_state(struct intel_gt *gt,
i915_gpu_coredump_put(error);
}
-struct i915_gpu_coredump *
+static struct i915_gpu_coredump *
i915_first_error_state(struct drm_i915_private *i915)
{
struct i915_gpu_coredump *error;
@@ -2378,3 +2379,184 @@ void intel_klog_error_capture(struct intel_gt *gt,
drm_info(&i915->drm, "[Capture/%d.%d] Dumped %zd bytes\n", l_count, line++, pos_err);
}
#endif
+
+static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *pos)
+{
+ struct i915_gpu_coredump *error;
+ ssize_t ret;
+ void *buf;
+
+ error = file->private_data;
+ if (!error)
+ return 0;
+
+ /* Bounce buffer required because of kernfs __user API convenience. */
+ buf = kmalloc(count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = i915_gpu_coredump_copy_to_buffer(error, buf, *pos, count);
+ if (ret <= 0)
+ goto out;
+
+ if (!copy_to_user(ubuf, buf, ret))
+ *pos += ret;
+ else
+ ret = -EFAULT;
+
+out:
+ kfree(buf);
+ return ret;
+}
+
+static int gpu_state_release(struct inode *inode, struct file *file)
+{
+ i915_gpu_coredump_put(file->private_data);
+ return 0;
+}
+
+static int i915_gpu_info_open(struct inode *inode, struct file *file)
+{
+ struct drm_i915_private *i915 = inode->i_private;
+ struct i915_gpu_coredump *gpu;
+ intel_wakeref_t wakeref;
+
+ gpu = NULL;
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ gpu = i915_gpu_coredump(to_gt(i915), ALL_ENGINES, CORE_DUMP_FLAG_NONE);
+
+ if (IS_ERR(gpu))
+ return PTR_ERR(gpu);
+
+ file->private_data = gpu;
+ return 0;
+}
+
+static const struct file_operations i915_gpu_info_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_gpu_info_open,
+ .read = gpu_state_read,
+ .llseek = default_llseek,
+ .release = gpu_state_release,
+};
+
+static ssize_t
+i915_error_state_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
+{
+ struct i915_gpu_coredump *error = filp->private_data;
+
+ if (!error)
+ return 0;
+
+ drm_dbg(&error->i915->drm, "Resetting error state\n");
+ i915_reset_error_state(error->i915);
+
+ return cnt;
+}
+
+static int i915_error_state_open(struct inode *inode, struct file *file)
+{
+ struct i915_gpu_coredump *error;
+
+ error = i915_first_error_state(inode->i_private);
+ if (IS_ERR(error))
+ return PTR_ERR(error);
+
+ file->private_data = error;
+ return 0;
+}
+
+static const struct file_operations i915_error_state_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_error_state_open,
+ .read = gpu_state_read,
+ .write = i915_error_state_write,
+ .llseek = default_llseek,
+ .release = gpu_state_release,
+};
+
+void i915_gpu_error_debugfs_register(struct drm_i915_private *i915)
+{
+ struct drm_minor *minor = i915->drm.primary;
+
+ debugfs_create_file("i915_error_state", 0644, minor->debugfs_root, i915,
+ &i915_error_state_fops);
+ debugfs_create_file("i915_gpu_info", 0644, minor->debugfs_root, i915,
+ &i915_gpu_info_fops);
+}
+
+static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+
+ struct device *kdev = kobj_to_dev(kobj);
+ struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
+ struct i915_gpu_coredump *gpu;
+ ssize_t ret = 0;
+
+ /*
+ * FIXME: Concurrent clients triggering resets and reading + clearing
+ * dumps can cause inconsistent sysfs reads when a user calls in with a
+ * non-zero offset to complete a prior partial read but the
+ * gpu_coredump has been cleared or replaced.
+ */
+
+ gpu = i915_first_error_state(i915);
+ if (IS_ERR(gpu)) {
+ ret = PTR_ERR(gpu);
+ } else if (gpu) {
+ ret = i915_gpu_coredump_copy_to_buffer(gpu, buf, off, count);
+ i915_gpu_coredump_put(gpu);
+ } else {
+ const char *str = "No error state collected\n";
+ size_t len = strlen(str);
+
+ if (off < len) {
+ ret = min_t(size_t, count, len - off);
+ memcpy(buf, str + off, ret);
+ }
+ }
+
+ return ret;
+}
+
+static ssize_t error_state_write(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct device *kdev = kobj_to_dev(kobj);
+ struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+
+ drm_dbg(&dev_priv->drm, "Resetting error state\n");
+ i915_reset_error_state(dev_priv);
+
+ return count;
+}
+
+static const struct bin_attribute error_state_attr = {
+ .attr.name = "error",
+ .attr.mode = S_IRUSR | S_IWUSR,
+ .size = 0,
+ .read = error_state_read,
+ .write = error_state_write,
+};
+
+void i915_gpu_error_sysfs_setup(struct drm_i915_private *i915)
+{
+ struct device *kdev = i915->drm.primary->kdev;
+
+ if (sysfs_create_bin_file(&kdev->kobj, &error_state_attr))
+ drm_err(&i915->drm, "error_state sysfs setup failed\n");
+}
+
+void i915_gpu_error_sysfs_teardown(struct drm_i915_private *i915)
+{
+ struct device *kdev = i915->drm.primary->kdev;
+
+ sysfs_remove_bin_file(&kdev->kobj, &error_state_attr);
+}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 4ce227f7e1e1..7c255bb1c319 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -17,6 +17,7 @@
#include "display/intel_display_device.h"
#include "display/intel_display_params.h"
#include "gt/intel_engine.h"
+#include "gt/intel_engine_types.h"
#include "gt/intel_gt_types.h"
#include "gt/uc/intel_uc_fw.h"
@@ -234,7 +235,7 @@ struct i915_gpu_error {
atomic_t reset_count;
/** Number of times an engine has been reset */
- atomic_t reset_engine_count[I915_NUM_ENGINES];
+ atomic_t reset_engine_count[MAX_ENGINE_CLASS];
};
struct drm_i915_error_state_buf {
@@ -257,7 +258,14 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error)
static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
const struct intel_engine_cs *engine)
{
- return atomic_read(&error->reset_engine_count[engine->uabi_class]);
+ return atomic_read(&error->reset_engine_count[engine->class]);
+}
+
+static inline void
+i915_increase_reset_engine_count(struct i915_gpu_error *error,
+ const struct intel_engine_cs *engine)
+{
+ atomic_inc(&error->reset_engine_count[engine->class]);
}
#define CORE_DUMP_FLAG_NONE 0x0
@@ -277,14 +285,7 @@ static inline void intel_klog_error_capture(struct intel_gt *gt,
__printf(2, 3)
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
-void intel_gpu_error_print_vma(struct drm_i915_error_state_buf *m,
- const struct intel_engine_cs *engine,
- const struct i915_vma_coredump *vma);
-struct i915_vma_coredump *
-intel_gpu_error_find_batch(const struct intel_engine_coredump *ee);
-
-struct i915_gpu_coredump *i915_gpu_coredump(struct intel_gt *gt,
- intel_engine_mask_t engine_mask, u32 dump_flags);
+
void i915_capture_error_state(struct intel_gt *gt,
intel_engine_mask_t engine_mask, u32 dump_flags);
@@ -332,10 +333,13 @@ static inline void i915_gpu_coredump_put(struct i915_gpu_coredump *gpu)
kref_put(&gpu->ref, __i915_gpu_coredump_free);
}
-struct i915_gpu_coredump *i915_first_error_state(struct drm_i915_private *i915);
void i915_reset_error_state(struct drm_i915_private *i915);
void i915_disable_error_state(struct drm_i915_private *i915, int err);
+void i915_gpu_error_debugfs_register(struct drm_i915_private *i915);
+void i915_gpu_error_sysfs_setup(struct drm_i915_private *i915);
+void i915_gpu_error_sysfs_teardown(struct drm_i915_private *i915);
+
#else
__printf(2, 3)
@@ -403,12 +407,6 @@ static inline void i915_gpu_coredump_put(struct i915_gpu_coredump *gpu)
{
}
-static inline struct i915_gpu_coredump *
-i915_first_error_state(struct drm_i915_private *i915)
-{
- return ERR_PTR(-ENODEV);
-}
-
static inline void i915_reset_error_state(struct drm_i915_private *i915)
{
}
@@ -418,6 +416,18 @@ static inline void i915_disable_error_state(struct drm_i915_private *i915,
{
}
+static inline void i915_gpu_error_debugfs_register(struct drm_i915_private *i915)
+{
+}
+
+static inline void i915_gpu_error_sysfs_setup(struct drm_i915_private *i915)
+{
+}
+
+static inline void i915_gpu_error_sysfs_teardown(struct drm_i915_private *i915)
+{
+}
+
#endif /* IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) */
#endif /* _I915_GPU_ERROR_H_ */
diff --git a/drivers/gpu/drm/i915/i915_hwmon.c b/drivers/gpu/drm/i915/i915_hwmon.c
index 975da8e7f2a9..8c3f443c8347 100644
--- a/drivers/gpu/drm/i915/i915_hwmon.c
+++ b/drivers/gpu/drm/i915/i915_hwmon.c
@@ -175,7 +175,7 @@ hwm_power1_max_interval_show(struct device *dev, struct device_attribute *attr,
* tau4 = (4 | x) << y
* but add 2 when doing the final right shift to account for units
*/
- tau4 = ((1 << x_w) | x) << y;
+ tau4 = (u64)((1 << x_w) | x) << y;
/* val in hwmon interface units (millisec) */
out = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
@@ -211,7 +211,7 @@ hwm_power1_max_interval_store(struct device *dev,
r = FIELD_PREP(PKG_MAX_WIN, PKG_MAX_WIN_DEFAULT);
x = REG_FIELD_GET(PKG_MAX_WIN_X, r);
y = REG_FIELD_GET(PKG_MAX_WIN_Y, r);
- tau4 = ((1 << x_w) | x) << y;
+ tau4 = (u64)((1 << x_w) | x) << y;
max_win = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
if (val > max_win)
diff --git a/drivers/gpu/drm/i915/i915_memcpy.c b/drivers/gpu/drm/i915/i915_memcpy.c
index 1b021a4902de..ba82277254b7 100644
--- a/drivers/gpu/drm/i915/i915_memcpy.c
+++ b/drivers/gpu/drm/i915/i915_memcpy.c
@@ -23,6 +23,8 @@
*/
#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/cpufeature.h>
#include <asm/fpu/api.h>
#include "i915_memcpy.h"
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 7b1c8de2f9cb..2d695818f006 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -772,10 +772,6 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
* The reason field includes flags identifying what
* triggered this specific report (mostly timer
* triggered or e.g. due to a context switch).
- *
- * In MMIO triggered reports, some platforms do not set the
- * reason bit in this field and it is valid to have a reason
- * field of zero.
*/
reason = oa_report_reason(stream, report);
ctx_id = oa_context_id(stream, report32);
@@ -787,8 +783,41 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
*
* Note: that we don't clear the valid_ctx_bit so userspace can
* understand that the ID has been squashed by the kernel.
+ *
+ * Update:
+ *
+ * On XEHP platforms the behavior of context id valid bit has
+ * changed compared to prior platforms. To describe this, we
+ * define a few terms:
+ *
+ * context-switch-report: This is a report with the reason type
+ * being context-switch. It is generated when a context switches
+ * out.
+ *
+ * context-valid-bit: A bit that is set in the report ID field
+ * to indicate that a valid context has been loaded.
+ *
+ * gpu-idle: A condition characterized by a
+ * context-switch-report with context-valid-bit set to 0.
+ *
+ * On prior platforms, context-id-valid bit is set to 0 only
+ * when GPU goes idle. In all other reports, it is set to 1.
+ *
+ * On XEHP platforms, context-valid-bit is set to 1 in a context
+ * switch report if a new context switched in. For all other
+ * reports it is set to 0.
+ *
+ * This change in behavior causes an issue with MMIO triggered
+ * reports. MMIO triggered reports have the markers in the
+ * context ID field and the context-valid-bit is 0. The logic
+ * below to squash the context ID would render the report
+ * useless since the user will not be able to find it in the OA
+ * buffer. Since MMIO triggered reports exist only on XEHP,
+ * we should avoid squashing these for XEHP platforms.
*/
- if (oa_report_ctx_invalid(stream, report)) {
+
+ if (oa_report_ctx_invalid(stream, report) &&
+ GRAPHICS_VER_FULL(stream->engine->i915) < IP_VER(12, 50)) {
ctx_id = INVALID_CTX_ID;
oa_context_id_squash(stream, report32);
}
diff --git a/drivers/gpu/drm/i915/i915_perf_types.h b/drivers/gpu/drm/i915/i915_perf_types.h
index 13b1ae9b96c7..46445248d193 100644
--- a/drivers/gpu/drm/i915/i915_perf_types.h
+++ b/drivers/gpu/drm/i915/i915_perf_types.h
@@ -291,7 +291,8 @@ struct i915_perf_stream {
int size_exponent;
/**
- * @ptr_lock: Locks reads and writes to all head/tail state
+ * @oa_buffer.ptr_lock: Locks reads and writes to all
+ * head/tail state
*
* Consider: the head and tail pointer state needs to be read
* consistently from a hrtimer callback (atomic context) and
@@ -313,7 +314,8 @@ struct i915_perf_stream {
spinlock_t ptr_lock;
/**
- * @head: Although we can always read back the head pointer register,
+ * @oa_buffer.head: Although we can always read back
+ * the head pointer register,
* we prefer to avoid trusting the HW state, just to avoid any
* risk that some hardware condition could * somehow bump the
* head pointer unpredictably and cause us to forward the wrong
@@ -322,7 +324,8 @@ struct i915_perf_stream {
u32 head;
/**
- * @tail: The last verified tail that can be read by userspace.
+ * @oa_buffer.tail: The last verified tail that can be
+ * read by userspace.
*/
u32 tail;
} oa_buffer;
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index f861863eb7c1..21eb0c5b320d 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -31,6 +31,16 @@
static cpumask_t i915_pmu_cpumask;
static unsigned int i915_pmu_target_cpu = -1;
+static struct i915_pmu *event_to_pmu(struct perf_event *event)
+{
+ return container_of(event->pmu, struct i915_pmu, base);
+}
+
+static struct drm_i915_private *pmu_to_i915(struct i915_pmu *pmu)
+{
+ return container_of(pmu, struct drm_i915_private, pmu);
+}
+
static u8 engine_config_sample(u64 config)
{
return config & I915_PMU_SAMPLE_MASK;
@@ -141,7 +151,7 @@ static u32 frequency_enabled_mask(void)
static bool pmu_needs_timer(struct i915_pmu *pmu)
{
- struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
u32 enable;
/*
@@ -213,19 +223,19 @@ static u64 get_rc6(struct intel_gt *gt)
struct drm_i915_private *i915 = gt->i915;
const unsigned int gt_id = gt->info.id;
struct i915_pmu *pmu = &i915->pmu;
+ intel_wakeref_t wakeref;
unsigned long flags;
- bool awake = false;
u64 val;
- if (intel_gt_pm_get_if_awake(gt)) {
+ wakeref = intel_gt_pm_get_if_awake(gt);
+ if (wakeref) {
val = __get_rc6(gt);
- intel_gt_pm_put_async(gt);
- awake = true;
+ intel_gt_pm_put_async(gt, wakeref);
}
spin_lock_irqsave(&pmu->lock, flags);
- if (awake) {
+ if (wakeref) {
store_sample(pmu, gt_id, __I915_SAMPLE_RC6, val);
} else {
/*
@@ -251,7 +261,7 @@ static u64 get_rc6(struct intel_gt *gt)
static void init_rc6(struct i915_pmu *pmu)
{
- struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
struct intel_gt *gt;
unsigned int i;
@@ -429,12 +439,14 @@ frequency_sample(struct intel_gt *gt, unsigned int period_ns)
const unsigned int gt_id = gt->info.id;
struct i915_pmu *pmu = &i915->pmu;
struct intel_rps *rps = &gt->rps;
+ intel_wakeref_t wakeref;
if (!frequency_sampling_enabled(pmu, gt_id))
return;
/* Report 0/0 (actual/requested) frequency while parked. */
- if (!intel_gt_pm_get_if_awake(gt))
+ wakeref = intel_gt_pm_get_if_awake(gt);
+ if (!wakeref)
return;
if (pmu->enable & config_mask(__I915_PMU_ACTUAL_FREQUENCY(gt_id))) {
@@ -463,14 +475,13 @@ frequency_sample(struct intel_gt *gt, unsigned int period_ns)
period_ns / 1000);
}
- intel_gt_pm_put_async(gt);
+ intel_gt_pm_put_async(gt, wakeref);
}
static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
{
- struct drm_i915_private *i915 =
- container_of(hrtimer, struct drm_i915_private, pmu.timer);
- struct i915_pmu *pmu = &i915->pmu;
+ struct i915_pmu *pmu = container_of(hrtimer, struct i915_pmu, timer);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
unsigned int period_ns;
struct intel_gt *gt;
unsigned int i;
@@ -505,8 +516,8 @@ static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
static void i915_pmu_event_destroy(struct perf_event *event)
{
- struct drm_i915_private *i915 =
- container_of(event->pmu, typeof(*i915), pmu.base);
+ struct i915_pmu *pmu = event_to_pmu(event);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
drm_WARN_ON(&i915->drm, event->parent);
@@ -572,8 +583,8 @@ config_status(struct drm_i915_private *i915, u64 config)
static int engine_event_init(struct perf_event *event)
{
- struct drm_i915_private *i915 =
- container_of(event->pmu, typeof(*i915), pmu.base);
+ struct i915_pmu *pmu = event_to_pmu(event);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
struct intel_engine_cs *engine;
engine = intel_engine_lookup_user(i915, engine_event_class(event),
@@ -586,9 +597,8 @@ static int engine_event_init(struct perf_event *event)
static int i915_pmu_event_init(struct perf_event *event)
{
- struct drm_i915_private *i915 =
- container_of(event->pmu, typeof(*i915), pmu.base);
- struct i915_pmu *pmu = &i915->pmu;
+ struct i915_pmu *pmu = event_to_pmu(event);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
int ret;
if (pmu->closed)
@@ -628,9 +638,8 @@ static int i915_pmu_event_init(struct perf_event *event)
static u64 __i915_pmu_event_read(struct perf_event *event)
{
- struct drm_i915_private *i915 =
- container_of(event->pmu, typeof(*i915), pmu.base);
- struct i915_pmu *pmu = &i915->pmu;
+ struct i915_pmu *pmu = event_to_pmu(event);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
u64 val = 0;
if (is_engine_event(event)) {
@@ -686,10 +695,8 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
static void i915_pmu_event_read(struct perf_event *event)
{
- struct drm_i915_private *i915 =
- container_of(event->pmu, typeof(*i915), pmu.base);
+ struct i915_pmu *pmu = event_to_pmu(event);
struct hw_perf_event *hwc = &event->hw;
- struct i915_pmu *pmu = &i915->pmu;
u64 prev, new;
if (pmu->closed) {
@@ -707,10 +714,9 @@ static void i915_pmu_event_read(struct perf_event *event)
static void i915_pmu_enable(struct perf_event *event)
{
- struct drm_i915_private *i915 =
- container_of(event->pmu, typeof(*i915), pmu.base);
+ struct i915_pmu *pmu = event_to_pmu(event);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
const unsigned int bit = event_bit(event);
- struct i915_pmu *pmu = &i915->pmu;
unsigned long flags;
if (bit == -1)
@@ -771,10 +777,9 @@ update:
static void i915_pmu_disable(struct perf_event *event)
{
- struct drm_i915_private *i915 =
- container_of(event->pmu, typeof(*i915), pmu.base);
+ struct i915_pmu *pmu = event_to_pmu(event);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
const unsigned int bit = event_bit(event);
- struct i915_pmu *pmu = &i915->pmu;
unsigned long flags;
if (bit == -1)
@@ -818,9 +823,7 @@ static void i915_pmu_disable(struct perf_event *event)
static void i915_pmu_event_start(struct perf_event *event, int flags)
{
- struct drm_i915_private *i915 =
- container_of(event->pmu, typeof(*i915), pmu.base);
- struct i915_pmu *pmu = &i915->pmu;
+ struct i915_pmu *pmu = event_to_pmu(event);
if (pmu->closed)
return;
@@ -848,9 +851,7 @@ out:
static int i915_pmu_event_add(struct perf_event *event, int flags)
{
- struct drm_i915_private *i915 =
- container_of(event->pmu, typeof(*i915), pmu.base);
- struct i915_pmu *pmu = &i915->pmu;
+ struct i915_pmu *pmu = event_to_pmu(event);
if (pmu->closed)
return -ENODEV;
@@ -982,7 +983,7 @@ add_pmu_attr(struct perf_pmu_events_attr *attr, const char *name,
static struct attribute **
create_event_attributes(struct i915_pmu *pmu)
{
- struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
static const struct {
unsigned int counter;
const char *name;
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index e88bb4f04305..613decd47760 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -155,81 +155,6 @@ static const struct bin_attribute dpf_attrs_1 = {
.private = (void *)1
};
-#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
-
-static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
-{
-
- struct device *kdev = kobj_to_dev(kobj);
- struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
- struct i915_gpu_coredump *gpu;
- ssize_t ret = 0;
-
- /*
- * FIXME: Concurrent clients triggering resets and reading + clearing
- * dumps can cause inconsistent sysfs reads when a user calls in with a
- * non-zero offset to complete a prior partial read but the
- * gpu_coredump has been cleared or replaced.
- */
-
- gpu = i915_first_error_state(i915);
- if (IS_ERR(gpu)) {
- ret = PTR_ERR(gpu);
- } else if (gpu) {
- ret = i915_gpu_coredump_copy_to_buffer(gpu, buf, off, count);
- i915_gpu_coredump_put(gpu);
- } else {
- const char *str = "No error state collected\n";
- size_t len = strlen(str);
-
- if (off < len) {
- ret = min_t(size_t, count, len - off);
- memcpy(buf, str + off, ret);
- }
- }
-
- return ret;
-}
-
-static ssize_t error_state_write(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
-{
- struct device *kdev = kobj_to_dev(kobj);
- struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
-
- drm_dbg(&dev_priv->drm, "Resetting error state\n");
- i915_reset_error_state(dev_priv);
-
- return count;
-}
-
-static const struct bin_attribute error_state_attr = {
- .attr.name = "error",
- .attr.mode = S_IRUSR | S_IWUSR,
- .size = 0,
- .read = error_state_read,
- .write = error_state_write,
-};
-
-static void i915_setup_error_capture(struct device *kdev)
-{
- if (sysfs_create_bin_file(&kdev->kobj, &error_state_attr))
- drm_err(&kdev_minor_to_i915(kdev)->drm,
- "error_state sysfs setup failed\n");
-}
-
-static void i915_teardown_error_capture(struct device *kdev)
-{
- sysfs_remove_bin_file(&kdev->kobj, &error_state_attr);
-}
-#else
-static void i915_setup_error_capture(struct device *kdev) {}
-static void i915_teardown_error_capture(struct device *kdev) {}
-#endif
-
void i915_setup_sysfs(struct drm_i915_private *dev_priv)
{
struct device *kdev = dev_priv->drm.primary->kdev;
@@ -255,7 +180,7 @@ void i915_setup_sysfs(struct drm_i915_private *dev_priv)
drm_warn(&dev_priv->drm,
"failed to register GT sysfs directory\n");
- i915_setup_error_capture(kdev);
+ i915_gpu_error_sysfs_setup(dev_priv);
intel_engines_add_sysfs(dev_priv);
}
@@ -264,7 +189,7 @@ void i915_teardown_sysfs(struct drm_i915_private *dev_priv)
{
struct device *kdev = dev_priv->drm.primary->kdev;
- i915_teardown_error_capture(kdev);
+ i915_gpu_error_sysfs_teardown(dev_priv);
device_remove_bin_file(kdev, &dpf_attrs_1);
device_remove_bin_file(kdev, &dpf_attrs);
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
index 3d1fdea9811d..60a03340bbd4 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -216,6 +216,22 @@ static int intel_memory_region_memtest(struct intel_memory_region *mem,
return err;
}
+static const char *region_type_str(u16 type)
+{
+ switch (type) {
+ case INTEL_MEMORY_SYSTEM:
+ return "system";
+ case INTEL_MEMORY_LOCAL:
+ return "local";
+ case INTEL_MEMORY_STOLEN_LOCAL:
+ return "stolen-local";
+ case INTEL_MEMORY_STOLEN_SYSTEM:
+ return "stolen-system";
+ default:
+ return "unknown";
+ }
+}
+
struct intel_memory_region *
intel_memory_region_create(struct drm_i915_private *i915,
resource_size_t start,
@@ -244,6 +260,9 @@ intel_memory_region_create(struct drm_i915_private *i915,
mem->type = type;
mem->instance = instance;
+ snprintf(mem->uabi_name, sizeof(mem->uabi_name), "%s%u",
+ region_type_str(type), instance);
+
mutex_init(&mem->objects.lock);
INIT_LIST_HEAD(&mem->objects.list);
diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h
index 2953ed5c3248..9ba36454e51b 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.h
+++ b/drivers/gpu/drm/i915/intel_memory_region.h
@@ -80,6 +80,7 @@ struct intel_memory_region {
u16 instance;
enum intel_region_id id;
char name[16];
+ char uabi_name[16];
bool private; /* not for userspace */
struct {
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 156cb1536b47..860b51b56a92 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -57,182 +57,37 @@ static struct drm_i915_private *rpm_to_i915(struct intel_runtime_pm *rpm)
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
-#include <linux/sort.h>
-
-#define STACKDEPTH 8
-
-static noinline depot_stack_handle_t __save_depot_stack(void)
-{
- unsigned long entries[STACKDEPTH];
- unsigned int n;
-
- n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
- return stack_depot_save(entries, n, GFP_NOWAIT | __GFP_NOWARN);
-}
-
static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
{
- spin_lock_init(&rpm->debug.lock);
- stack_depot_init();
+ ref_tracker_dir_init(&rpm->debug, INTEL_REFTRACK_DEAD_COUNT, dev_name(rpm->kdev));
}
-static noinline depot_stack_handle_t
+static intel_wakeref_t
track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
{
- depot_stack_handle_t stack, *stacks;
- unsigned long flags;
-
- if (rpm->no_wakeref_tracking)
+ if (!rpm->available || rpm->no_wakeref_tracking)
return -1;
- stack = __save_depot_stack();
- if (!stack)
- return -1;
-
- spin_lock_irqsave(&rpm->debug.lock, flags);
-
- if (!rpm->debug.count)
- rpm->debug.last_acquire = stack;
-
- stacks = krealloc(rpm->debug.owners,
- (rpm->debug.count + 1) * sizeof(*stacks),
- GFP_NOWAIT | __GFP_NOWARN);
- if (stacks) {
- stacks[rpm->debug.count++] = stack;
- rpm->debug.owners = stacks;
- } else {
- stack = -1;
- }
-
- spin_unlock_irqrestore(&rpm->debug.lock, flags);
-
- return stack;
+ return intel_ref_tracker_alloc(&rpm->debug);
}
static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
- depot_stack_handle_t stack)
-{
- struct drm_i915_private *i915 = container_of(rpm,
- struct drm_i915_private,
- runtime_pm);
- unsigned long flags, n;
- bool found = false;
-
- if (unlikely(stack == -1))
- return;
-
- spin_lock_irqsave(&rpm->debug.lock, flags);
- for (n = rpm->debug.count; n--; ) {
- if (rpm->debug.owners[n] == stack) {
- memmove(rpm->debug.owners + n,
- rpm->debug.owners + n + 1,
- (--rpm->debug.count - n) * sizeof(stack));
- found = true;
- break;
- }
- }
- spin_unlock_irqrestore(&rpm->debug.lock, flags);
-
- if (drm_WARN(&i915->drm, !found,
- "Unmatched wakeref (tracking %lu), count %u\n",
- rpm->debug.count, atomic_read(&rpm->wakeref_count))) {
- char *buf;
-
- buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
- if (!buf)
- return;
-
- stack_depot_snprint(stack, buf, PAGE_SIZE, 2);
- DRM_DEBUG_DRIVER("wakeref %x from\n%s", stack, buf);
-
- stack = READ_ONCE(rpm->debug.last_release);
- if (stack) {
- stack_depot_snprint(stack, buf, PAGE_SIZE, 2);
- DRM_DEBUG_DRIVER("wakeref last released at\n%s", buf);
- }
-
- kfree(buf);
- }
-}
-
-static int cmphandle(const void *_a, const void *_b)
-{
- const depot_stack_handle_t * const a = _a, * const b = _b;
-
- if (*a < *b)
- return -1;
- else if (*a > *b)
- return 1;
- else
- return 0;
-}
-
-static void
-__print_intel_runtime_pm_wakeref(struct drm_printer *p,
- const struct intel_runtime_pm_debug *dbg)
+ intel_wakeref_t wakeref)
{
- unsigned long i;
- char *buf;
-
- buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
- if (!buf)
+ if (!rpm->available || rpm->no_wakeref_tracking)
return;
- if (dbg->last_acquire) {
- stack_depot_snprint(dbg->last_acquire, buf, PAGE_SIZE, 2);
- drm_printf(p, "Wakeref last acquired:\n%s", buf);
- }
-
- if (dbg->last_release) {
- stack_depot_snprint(dbg->last_release, buf, PAGE_SIZE, 2);
- drm_printf(p, "Wakeref last released:\n%s", buf);
- }
-
- drm_printf(p, "Wakeref count: %lu\n", dbg->count);
-
- sort(dbg->owners, dbg->count, sizeof(*dbg->owners), cmphandle, NULL);
-
- for (i = 0; i < dbg->count; i++) {
- depot_stack_handle_t stack = dbg->owners[i];
- unsigned long rep;
-
- rep = 1;
- while (i + 1 < dbg->count && dbg->owners[i + 1] == stack)
- rep++, i++;
- stack_depot_snprint(stack, buf, PAGE_SIZE, 2);
- drm_printf(p, "Wakeref x%lu taken at:\n%s", rep, buf);
- }
-
- kfree(buf);
-}
-
-static noinline void
-__untrack_all_wakerefs(struct intel_runtime_pm_debug *debug,
- struct intel_runtime_pm_debug *saved)
-{
- *saved = *debug;
-
- debug->owners = NULL;
- debug->count = 0;
- debug->last_release = __save_depot_stack();
+ intel_ref_tracker_free(&rpm->debug, wakeref);
}
-static void
-dump_and_free_wakeref_tracking(struct intel_runtime_pm_debug *debug)
+static void untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm)
{
- if (debug->count) {
- struct drm_printer p = drm_debug_printer("i915");
-
- __print_intel_runtime_pm_wakeref(&p, debug);
- }
-
- kfree(debug->owners);
+ ref_tracker_dir_exit(&rpm->debug);
}
static noinline void
__intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm)
{
- struct intel_runtime_pm_debug dbg = {};
unsigned long flags;
if (!atomic_dec_and_lock_irqsave(&rpm->wakeref_count,
@@ -240,60 +95,14 @@ __intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm)
flags))
return;
- __untrack_all_wakerefs(&rpm->debug, &dbg);
+ ref_tracker_dir_print_locked(&rpm->debug, INTEL_REFTRACK_PRINT_LIMIT);
spin_unlock_irqrestore(&rpm->debug.lock, flags);
-
- dump_and_free_wakeref_tracking(&dbg);
-}
-
-static noinline void
-untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm)
-{
- struct intel_runtime_pm_debug dbg = {};
- unsigned long flags;
-
- spin_lock_irqsave(&rpm->debug.lock, flags);
- __untrack_all_wakerefs(&rpm->debug, &dbg);
- spin_unlock_irqrestore(&rpm->debug.lock, flags);
-
- dump_and_free_wakeref_tracking(&dbg);
}
void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
struct drm_printer *p)
{
- struct intel_runtime_pm_debug dbg = {};
-
- do {
- unsigned long alloc = dbg.count;
- depot_stack_handle_t *s;
-
- spin_lock_irq(&rpm->debug.lock);
- dbg.count = rpm->debug.count;
- if (dbg.count <= alloc) {
- memcpy(dbg.owners,
- rpm->debug.owners,
- dbg.count * sizeof(*s));
- }
- dbg.last_acquire = rpm->debug.last_acquire;
- dbg.last_release = rpm->debug.last_release;
- spin_unlock_irq(&rpm->debug.lock);
- if (dbg.count <= alloc)
- break;
-
- s = krealloc(dbg.owners,
- dbg.count * sizeof(*s),
- GFP_NOWAIT | __GFP_NOWARN);
- if (!s)
- goto out;
-
- dbg.owners = s;
- } while (1);
-
- __print_intel_runtime_pm_wakeref(p, &dbg);
-
-out:
- kfree(dbg.owners);
+ intel_ref_tracker_show(&rpm->debug, p);
}
#else
@@ -302,14 +111,14 @@ static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
{
}
-static depot_stack_handle_t
+static intel_wakeref_t
track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
{
return -1;
}
static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
- intel_wakeref_t wref)
+ intel_wakeref_t wakeref)
{
}
@@ -636,7 +445,11 @@ void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm)
"i915 raw-wakerefs=%d wakelocks=%d on cleanup\n",
intel_rpm_raw_wakeref_count(count),
intel_rpm_wakelock_count(count));
+}
+void intel_runtime_pm_driver_last_release(struct intel_runtime_pm *rpm)
+{
+ intel_runtime_pm_driver_release(rpm);
untrack_all_intel_runtime_pm_wakerefs(rpm);
}
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h
index be43614c73fd..de3579d399e1 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.h
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.h
@@ -75,15 +75,7 @@ struct intel_runtime_pm {
* paired rpm_put) we can remove corresponding pairs of and keep
* the array trimmed to active wakerefs.
*/
- struct intel_runtime_pm_debug {
- spinlock_t lock;
-
- depot_stack_handle_t last_acquire;
- depot_stack_handle_t last_release;
-
- depot_stack_handle_t *owners;
- unsigned long count;
- } debug;
+ struct ref_tracker_dir debug;
#endif
};
@@ -187,6 +179,7 @@ void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm);
void intel_runtime_pm_enable(struct intel_runtime_pm *rpm);
void intel_runtime_pm_disable(struct intel_runtime_pm *rpm);
void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm);
+void intel_runtime_pm_driver_last_release(struct intel_runtime_pm *rpm);
intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm);
intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm);
diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c
index 623a69089386..dea2f63184f8 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.c
+++ b/drivers/gpu/drm/i915/intel_wakeref.c
@@ -99,7 +99,8 @@ static void __intel_wakeref_put_work(struct work_struct *wrk)
void __intel_wakeref_init(struct intel_wakeref *wf,
struct drm_i915_private *i915,
const struct intel_wakeref_ops *ops,
- struct intel_wakeref_lockclass *key)
+ struct intel_wakeref_lockclass *key,
+ const char *name)
{
wf->i915 = i915;
wf->ops = ops;
@@ -111,6 +112,10 @@ void __intel_wakeref_init(struct intel_wakeref *wf,
INIT_DELAYED_WORK(&wf->work, __intel_wakeref_put_work);
lockdep_init_map(&wf->work.work.lockdep_map,
"wakeref.work", &key->work, 0);
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_WAKEREF)
+ ref_tracker_dir_init(&wf->debug, INTEL_REFTRACK_DEAD_COUNT, name);
+#endif
}
int intel_wakeref_wait_for_idle(struct intel_wakeref *wf)
@@ -191,3 +196,31 @@ void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf)
intel_wakeref_auto(wf, 0);
INTEL_WAKEREF_BUG_ON(wf->wakeref);
}
+
+void intel_ref_tracker_show(struct ref_tracker_dir *dir,
+ struct drm_printer *p)
+{
+ const size_t buf_size = PAGE_SIZE;
+ char *buf, *sb, *se;
+ size_t count;
+
+ buf = kmalloc(buf_size, GFP_NOWAIT);
+ if (!buf)
+ return;
+
+ count = ref_tracker_dir_snprint(dir, buf, buf_size);
+ if (!count)
+ goto free;
+ /* printk does not like big buffers, so we split it */
+ for (sb = buf; *sb; sb = se + 1) {
+ se = strchrnul(sb, '\n');
+ drm_printf(p, "%.*s", (int)(se - sb + 1), sb);
+ if (!*se)
+ break;
+ }
+ if (count >= buf_size)
+ drm_printf(p, "\n...dropped %zd extra bytes of leak report.\n",
+ count + 1 - buf_size);
+free:
+ kfree(buf);
+}
diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h
index ec881b097368..68aa3be48251 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.h
+++ b/drivers/gpu/drm/i915/intel_wakeref.h
@@ -7,16 +7,25 @@
#ifndef INTEL_WAKEREF_H
#define INTEL_WAKEREF_H
+#include <drm/drm_print.h>
+
#include <linux/atomic.h>
#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/lockdep.h>
#include <linux/mutex.h>
#include <linux/refcount.h>
+#include <linux/ref_tracker.h>
+#include <linux/slab.h>
#include <linux/stackdepot.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
+typedef unsigned long intel_wakeref_t;
+
+#define INTEL_REFTRACK_DEAD_COUNT 16
+#define INTEL_REFTRACK_PRINT_LIMIT 16
+
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
#define INTEL_WAKEREF_BUG_ON(expr) BUG_ON(expr)
#else
@@ -26,8 +35,6 @@
struct intel_runtime_pm;
struct intel_wakeref;
-typedef depot_stack_handle_t intel_wakeref_t;
-
struct intel_wakeref_ops {
int (*get)(struct intel_wakeref *wf);
int (*put)(struct intel_wakeref *wf);
@@ -43,6 +50,10 @@ struct intel_wakeref {
const struct intel_wakeref_ops *ops;
struct delayed_work work;
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_WAKEREF)
+ struct ref_tracker_dir debug;
+#endif
};
struct intel_wakeref_lockclass {
@@ -53,11 +64,12 @@ struct intel_wakeref_lockclass {
void __intel_wakeref_init(struct intel_wakeref *wf,
struct drm_i915_private *i915,
const struct intel_wakeref_ops *ops,
- struct intel_wakeref_lockclass *key);
-#define intel_wakeref_init(wf, i915, ops) do { \
+ struct intel_wakeref_lockclass *key,
+ const char *name);
+#define intel_wakeref_init(wf, i915, ops, name) do { \
static struct intel_wakeref_lockclass __key; \
\
- __intel_wakeref_init((wf), (i915), (ops), &__key); \
+ __intel_wakeref_init((wf), (i915), (ops), &__key, name); \
} while (0)
int __intel_wakeref_get_first(struct intel_wakeref *wf);
@@ -261,6 +273,57 @@ __intel_wakeref_defer_park(struct intel_wakeref *wf)
*/
int intel_wakeref_wait_for_idle(struct intel_wakeref *wf);
+#define INTEL_WAKEREF_DEF ((intel_wakeref_t)(-1))
+
+static inline intel_wakeref_t intel_ref_tracker_alloc(struct ref_tracker_dir *dir)
+{
+ struct ref_tracker *user = NULL;
+
+ ref_tracker_alloc(dir, &user, GFP_NOWAIT);
+
+ return (intel_wakeref_t)user ?: INTEL_WAKEREF_DEF;
+}
+
+static inline void intel_ref_tracker_free(struct ref_tracker_dir *dir,
+ intel_wakeref_t handle)
+{
+ struct ref_tracker *user;
+
+ user = (handle == INTEL_WAKEREF_DEF) ? NULL : (void *)handle;
+
+ ref_tracker_free(dir, &user);
+}
+
+void intel_ref_tracker_show(struct ref_tracker_dir *dir,
+ struct drm_printer *p);
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_WAKEREF)
+
+static inline intel_wakeref_t intel_wakeref_track(struct intel_wakeref *wf)
+{
+ return intel_ref_tracker_alloc(&wf->debug);
+}
+
+static inline void intel_wakeref_untrack(struct intel_wakeref *wf,
+ intel_wakeref_t handle)
+{
+ intel_ref_tracker_free(&wf->debug, handle);
+}
+
+#else
+
+static inline intel_wakeref_t intel_wakeref_track(struct intel_wakeref *wf)
+{
+ return -1;
+}
+
+static inline void intel_wakeref_untrack(struct intel_wakeref *wf,
+ intel_wakeref_t handle)
+{
+}
+
+#endif
+
struct intel_wakeref_auto {
struct drm_i915_private *i915;
struct timer_list timer;
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.c b/drivers/gpu/drm/i915/pxp/intel_pxp.c
index dc327cf40b5a..75278e78ca90 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp.c
@@ -199,6 +199,9 @@ int intel_pxp_init(struct drm_i915_private *i915)
struct intel_gt *gt;
bool is_full_feature = false;
+ if (intel_gt_is_wedged(to_gt(i915)))
+ return -ENOTCONN;
+
/*
* NOTE: Get the ctrl_gt before checking intel_pxp_is_supported since
* we still need it if PXP's backend tee transport is needed.
@@ -303,6 +306,8 @@ static int __pxp_global_teardown_final(struct intel_pxp *pxp)
if (!pxp->arb_is_valid)
return 0;
+
+ drm_dbg(&pxp->ctrl_gt->i915->drm, "PXP: teardown for suspend/fini");
/*
* To ensure synchronous and coherent session teardown completion
* in response to suspend or shutdown triggers, don't use a worker.
@@ -324,6 +329,8 @@ static int __pxp_global_teardown_restart(struct intel_pxp *pxp)
if (pxp->arb_is_valid)
return 0;
+
+ drm_dbg(&pxp->ctrl_gt->i915->drm, "PXP: teardown for restart");
/*
* The arb-session is currently inactive and we are doing a reset and restart
* due to a runtime event. Use the worker that was designed for this.
@@ -332,8 +339,11 @@ static int __pxp_global_teardown_restart(struct intel_pxp *pxp)
timeout = intel_pxp_get_backend_timeout_ms(pxp);
- if (!wait_for_completion_timeout(&pxp->termination, msecs_to_jiffies(timeout)))
+ if (!wait_for_completion_timeout(&pxp->termination, msecs_to_jiffies(timeout))) {
+ drm_dbg(&pxp->ctrl_gt->i915->drm, "PXP: restart backend timed out (%d ms)",
+ timeout);
return -ETIMEDOUT;
+ }
return 0;
}
@@ -414,10 +424,12 @@ int intel_pxp_start(struct intel_pxp *pxp)
int ret = 0;
ret = intel_pxp_get_readiness_status(pxp, PXP_READINESS_TIMEOUT);
- if (ret < 0)
+ if (ret < 0) {
+ drm_dbg(&pxp->ctrl_gt->i915->drm, "PXP: tried but not-avail (%d)", ret);
return ret;
- else if (ret > 1)
+ } else if (ret > 1) {
return -EIO; /* per UAPI spec, user may retry later */
+ }
mutex_lock(&pxp->arb_mutex);
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c
index 91e9622c07d0..d81750b9bdda 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c
@@ -40,11 +40,12 @@ void intel_pxp_irq_handler(struct intel_pxp *pxp, u16 iir)
GEN12_DISPLAY_APP_TERMINATED_PER_FW_REQ_INTERRUPT)) {
/* immediately mark PXP as inactive on termination */
intel_pxp_mark_termination_in_progress(pxp);
- pxp->session_events |= PXP_TERMINATION_REQUEST | PXP_INVAL_REQUIRED;
+ pxp->session_events |= PXP_TERMINATION_REQUEST | PXP_INVAL_REQUIRED |
+ PXP_EVENT_TYPE_IRQ;
}
if (iir & GEN12_DISPLAY_STATE_RESET_COMPLETE_INTERRUPT)
- pxp->session_events |= PXP_TERMINATION_COMPLETE;
+ pxp->session_events |= PXP_TERMINATION_COMPLETE | PXP_EVENT_TYPE_IRQ;
if (pxp->session_events)
queue_work(system_unbound_wq, &pxp->session_work);
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
index 0a3e66b0265e..091c86e03d1a 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
@@ -137,8 +137,10 @@ void intel_pxp_terminate(struct intel_pxp *pxp, bool post_invalidation_needs_res
static void pxp_terminate_complete(struct intel_pxp *pxp)
{
/* Re-create the arb session after teardown handle complete */
- if (fetch_and_zero(&pxp->hw_state_invalidated))
+ if (fetch_and_zero(&pxp->hw_state_invalidated)) {
+ drm_dbg(&pxp->ctrl_gt->i915->drm, "PXP: creating arb_session after invalidation");
pxp_create_arb_session(pxp);
+ }
complete_all(&pxp->termination);
}
@@ -157,6 +159,8 @@ static void pxp_session_work(struct work_struct *work)
if (!events)
return;
+ drm_dbg(&gt->i915->drm, "PXP: processing event-flags 0x%08x", events);
+
if (events & PXP_INVAL_REQUIRED)
intel_pxp_invalidate(pxp);
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_types.h b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h
index 7e11fa8034b2..07864b584cf4 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_types.h
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h
@@ -124,6 +124,7 @@ struct intel_pxp {
#define PXP_TERMINATION_REQUEST BIT(0)
#define PXP_TERMINATION_COMPLETE BIT(1)
#define PXP_INVAL_REQUIRED BIT(2)
+#define PXP_EVENT_TYPE_IRQ BIT(3)
};
#endif /* __INTEL_PXP_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/selftests/igt_live_test.c b/drivers/gpu/drm/i915/selftests/igt_live_test.c
index 4ddc6d902752..7d41874a49c5 100644
--- a/drivers/gpu/drm/i915/selftests/igt_live_test.c
+++ b/drivers/gpu/drm/i915/selftests/igt_live_test.c
@@ -37,8 +37,9 @@ int igt_live_test_begin(struct igt_live_test *t,
}
for_each_engine(engine, gt, id)
- t->reset_engine[id] =
- i915_reset_engine_count(&i915->gpu_error, engine);
+ t->reset_engine[i][id] =
+ i915_reset_engine_count(&i915->gpu_error,
+ engine);
}
t->reset_global = i915_reset_count(&i915->gpu_error);
@@ -66,14 +67,14 @@ int igt_live_test_end(struct igt_live_test *t)
for_each_gt(gt, i915, i) {
for_each_engine(engine, gt, id) {
- if (t->reset_engine[id] ==
+ if (t->reset_engine[i][id] ==
i915_reset_engine_count(&i915->gpu_error, engine))
continue;
gt_err(gt, "%s(%s): engine '%s' was reset %d times!\n",
t->func, t->name, engine->name,
i915_reset_engine_count(&i915->gpu_error, engine) -
- t->reset_engine[id]);
+ t->reset_engine[i][id]);
return -EIO;
}
}
diff --git a/drivers/gpu/drm/i915/selftests/igt_live_test.h b/drivers/gpu/drm/i915/selftests/igt_live_test.h
index 36ed42736c52..83e3ad430922 100644
--- a/drivers/gpu/drm/i915/selftests/igt_live_test.h
+++ b/drivers/gpu/drm/i915/selftests/igt_live_test.h
@@ -7,6 +7,7 @@
#ifndef IGT_LIVE_TEST_H
#define IGT_LIVE_TEST_H
+#include "gt/intel_gt_defines.h" /* for I915_MAX_GT */
#include "gt/intel_engine.h" /* for I915_NUM_ENGINES */
struct drm_i915_private;
@@ -17,7 +18,7 @@ struct igt_live_test {
const char *name;
unsigned int reset_global;
- unsigned int reset_engine[I915_NUM_ENGINES];
+ unsigned int reset_engine[I915_MAX_GT][I915_NUM_ENGINES];
};
/*
diff --git a/drivers/gpu/drm/imagination/pvr_device.h b/drivers/gpu/drm/imagination/pvr_device.h
index 2ca7e535799f..ecdd5767d8ef 100644
--- a/drivers/gpu/drm/imagination/pvr_device.h
+++ b/drivers/gpu/drm/imagination/pvr_device.h
@@ -193,13 +193,14 @@ struct pvr_device {
* @queues: Queue-related fields.
*/
struct {
- /** @active: Active queue list. */
+ /** @queues.active: Active queue list. */
struct list_head active;
- /** @idle: Idle queue list. */
+ /** @queues.idle: Idle queue list. */
struct list_head idle;
- /** @lock: Lock protecting access to the active/idle lists. */
+ /** @queues.lock: Lock protecting access to the active/idle
+ * lists. */
struct mutex lock;
} queues;
@@ -207,18 +208,18 @@ struct pvr_device {
* @watchdog: Watchdog for communications with firmware.
*/
struct {
- /** @work: Work item for watchdog callback. */
+ /** @watchdog.work: Work item for watchdog callback. */
struct delayed_work work;
/**
- * @old_kccb_cmds_executed: KCCB command execution count at last
- * watchdog poll.
+ * @watchdog.old_kccb_cmds_executed: KCCB command execution
+ * count at last watchdog poll.
*/
u32 old_kccb_cmds_executed;
/**
- * @kccb_stall_count: Number of watchdog polls KCCB has been
- * stalled for.
+ * @watchdog.kccb_stall_count: Number of watchdog polls
+ * KCCB has been stalled for.
*/
u32 kccb_stall_count;
} watchdog;
@@ -227,43 +228,46 @@ struct pvr_device {
* @kccb: Circular buffer for communications with firmware.
*/
struct {
- /** @ccb: Kernel CCB. */
+ /** @kccb.ccb: Kernel CCB. */
struct pvr_ccb ccb;
- /** @rtn_q: Waitqueue for KCCB command return waiters. */
+ /** @kccb.rtn_q: Waitqueue for KCCB command return waiters. */
wait_queue_head_t rtn_q;
- /** @rtn_obj: Object representing KCCB return slots. */
+ /** @kccb.rtn_obj: Object representing KCCB return slots. */
struct pvr_fw_object *rtn_obj;
/**
- * @rtn: Pointer to CPU mapping of KCCB return slots. Must be
- * accessed by READ_ONCE()/WRITE_ONCE().
+ * @kccb.rtn: Pointer to CPU mapping of KCCB return slots.
+ * Must be accessed by READ_ONCE()/WRITE_ONCE().
*/
u32 *rtn;
- /** @slot_count: Total number of KCCB slots available. */
+ /** @kccb.slot_count: Total number of KCCB slots available. */
u32 slot_count;
- /** @reserved_count: Number of KCCB slots reserved for future use. */
+ /** @kccb.reserved_count: Number of KCCB slots reserved for
+ * future use. */
u32 reserved_count;
/**
- * @waiters: List of KCCB slot waiters.
+ * @kccb.waiters: List of KCCB slot waiters.
*/
struct list_head waiters;
- /** @fence_ctx: KCCB fence context. */
+ /** @kccb.fence_ctx: KCCB fence context. */
struct {
- /** @id: KCCB fence context ID allocated with dma_fence_context_alloc(). */
+ /** @kccb.fence_ctx.id: KCCB fence context ID
+ * allocated with dma_fence_context_alloc(). */
u64 id;
- /** @seqno: Sequence number incremented each time a fence is created. */
+ /** @kccb.fence_ctx.seqno: Sequence number incremented
+ * each time a fence is created. */
atomic_t seqno;
/**
- * @lock: Lock used to synchronize access to fences allocated by this
- * context.
+ * @kccb.fence_ctx.lock: Lock used to synchronize
+ * access to fences allocated by this context.
*/
spinlock_t lock;
} fence_ctx;
diff --git a/drivers/gpu/drm/imagination/pvr_fw_trace.c b/drivers/gpu/drm/imagination/pvr_fw_trace.c
index 7159fc479001..31199e45b72e 100644
--- a/drivers/gpu/drm/imagination/pvr_fw_trace.c
+++ b/drivers/gpu/drm/imagination/pvr_fw_trace.c
@@ -248,7 +248,7 @@ static bool fw_trace_get_next(struct pvr_fw_trace_seq_data *trace_seq_data)
continue;
return true;
- };
+ }
/* Hit end of trace data. */
return false;
diff --git a/drivers/gpu/drm/imagination/pvr_hwrt.c b/drivers/gpu/drm/imagination/pvr_hwrt.c
index c4213c18489e..54f88d6c01e5 100644
--- a/drivers/gpu/drm/imagination/pvr_hwrt.c
+++ b/drivers/gpu/drm/imagination/pvr_hwrt.c
@@ -458,7 +458,7 @@ pvr_hwrt_dataset_create(struct pvr_file *pvr_file,
struct drm_pvr_ioctl_create_hwrt_dataset_args *args)
{
struct pvr_hwrt_dataset *hwrt;
- int err;
+ int err, i = 0;
/* Create and fill out the kernel structure */
hwrt = kzalloc(sizeof(*hwrt), GFP_KERNEL);
@@ -466,35 +466,36 @@ pvr_hwrt_dataset_create(struct pvr_file *pvr_file,
if (!hwrt)
return ERR_PTR(-ENOMEM);
- kref_init(&hwrt->ref_count);
-
err = hwrt_init_kernel_structure(pvr_file, args, hwrt);
if (err < 0)
goto err_free;
err = hwrt_init_common_fw_structure(pvr_file, args, hwrt);
if (err < 0)
- goto err_free;
+ goto err_fini_kernel_structure;
- for (int i = 0; i < ARRAY_SIZE(hwrt->data); i++) {
+ for (; i < ARRAY_SIZE(hwrt->data); i++) {
err = hwrt_data_init_fw_structure(pvr_file, hwrt, args,
&args->rt_data_args[i],
&hwrt->data[i]);
- if (err < 0) {
- i--;
- /* Destroy already created structures. */
- for (; i >= 0; i--)
- hwrt_data_fini_fw_structure(hwrt, i);
- goto err_free;
- }
+ if (err < 0)
+ goto err_fini_data_structures;
hwrt->data[i].hwrt_dataset = hwrt;
}
+ kref_init(&hwrt->ref_count);
return hwrt;
+err_fini_data_structures:
+ while (--i >= 0)
+ hwrt_data_fini_fw_structure(hwrt, i);
+
+err_fini_kernel_structure:
+ hwrt_fini_kernel_structure(hwrt);
+
err_free:
- pvr_hwrt_dataset_put(hwrt);
+ kfree(hwrt);
return ERR_PTR(err);
}
diff --git a/drivers/gpu/drm/imagination/pvr_job.c b/drivers/gpu/drm/imagination/pvr_job.c
index 04139da6c04d..78c2f3c6dce0 100644
--- a/drivers/gpu/drm/imagination/pvr_job.c
+++ b/drivers/gpu/drm/imagination/pvr_job.c
@@ -746,7 +746,7 @@ pvr_submit_jobs(struct pvr_device *pvr_dev, struct pvr_file *pvr_file,
if (err)
goto out_job_data_cleanup;
- drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT | DRM_EXEC_IGNORE_DUPLICATES);
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT | DRM_EXEC_IGNORE_DUPLICATES, 0);
xa_init_flags(&signal_array, XA_FLAGS_ALLOC);
diff --git a/drivers/gpu/drm/imagination/pvr_mmu.c b/drivers/gpu/drm/imagination/pvr_mmu.c
index c8562bfc0dcd..4fe70610ed94 100644
--- a/drivers/gpu/drm/imagination/pvr_mmu.c
+++ b/drivers/gpu/drm/imagination/pvr_mmu.c
@@ -316,12 +316,14 @@ err_free_page:
static void
pvr_mmu_backing_page_fini(struct pvr_mmu_backing_page *page)
{
- struct device *dev = from_pvr_device(page->pvr_dev)->dev;
+ struct device *dev;
/* Do nothing if no allocation is present. */
if (!page->pvr_dev)
return;
+ dev = from_pvr_device(page->pvr_dev)->dev;
+
dma_unmap_page(dev, page->dma_addr, PVR_MMU_BACKING_PAGE_SIZE,
DMA_TO_DEVICE);
diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c
index f42345fbe4bf..e59517ba039e 100644
--- a/drivers/gpu/drm/imagination/pvr_vm.c
+++ b/drivers/gpu/drm/imagination/pvr_vm.c
@@ -225,7 +225,7 @@ pvr_vm_bind_op_map_init(struct pvr_vm_bind_op *bind_op,
u64 device_addr, u64 size)
{
struct drm_gem_object *obj = gem_from_pvr_gem(pvr_obj);
- const bool is_user = vm_ctx == vm_ctx->pvr_dev->kernel_vm_ctx;
+ const bool is_user = vm_ctx != vm_ctx->pvr_dev->kernel_vm_ctx;
const u64 pvr_obj_size = pvr_gem_object_size(pvr_obj);
struct sg_table *sgt;
u64 offset_plus_size;
@@ -556,23 +556,12 @@ pvr_vm_create_context(struct pvr_device *pvr_dev, bool is_userspace_context)
if (!vm_ctx)
return ERR_PTR(-ENOMEM);
- drm_gem_private_object_init(&pvr_dev->base, &vm_ctx->dummy_gem, 0);
-
vm_ctx->pvr_dev = pvr_dev;
- kref_init(&vm_ctx->ref_count);
- mutex_init(&vm_ctx->lock);
-
- drm_gpuvm_init(&vm_ctx->gpuvm_mgr,
- is_userspace_context ? "PowerVR-user-VM" : "PowerVR-FW-VM",
- 0, &pvr_dev->base, &vm_ctx->dummy_gem,
- 0, 1ULL << device_addr_bits, 0, 0, &pvr_vm_gpuva_ops);
vm_ctx->mmu_ctx = pvr_mmu_context_create(pvr_dev);
- err = PTR_ERR_OR_ZERO(&vm_ctx->mmu_ctx);
- if (err) {
- vm_ctx->mmu_ctx = NULL;
- goto err_put_ctx;
- }
+ err = PTR_ERR_OR_ZERO(vm_ctx->mmu_ctx);
+ if (err)
+ goto err_free;
if (is_userspace_context) {
err = pvr_fw_object_create(pvr_dev, sizeof(struct rogue_fwif_fwmemcontext),
@@ -583,13 +572,22 @@ pvr_vm_create_context(struct pvr_device *pvr_dev, bool is_userspace_context)
goto err_page_table_destroy;
}
+ drm_gem_private_object_init(&pvr_dev->base, &vm_ctx->dummy_gem, 0);
+ drm_gpuvm_init(&vm_ctx->gpuvm_mgr,
+ is_userspace_context ? "PowerVR-user-VM" : "PowerVR-FW-VM",
+ 0, &pvr_dev->base, &vm_ctx->dummy_gem,
+ 0, 1ULL << device_addr_bits, 0, 0, &pvr_vm_gpuva_ops);
+
+ mutex_init(&vm_ctx->lock);
+ kref_init(&vm_ctx->ref_count);
+
return vm_ctx;
err_page_table_destroy:
pvr_mmu_context_destroy(vm_ctx->mmu_ctx);
-err_put_ctx:
- pvr_vm_context_put(vm_ctx);
+err_free:
+ kfree(vm_ctx);
return ERR_PTR(err);
}
diff --git a/drivers/gpu/drm/imagination/pvr_vm_mips.c b/drivers/gpu/drm/imagination/pvr_vm_mips.c
index 2bc7181a4c3e..b7fef3c797e6 100644
--- a/drivers/gpu/drm/imagination/pvr_vm_mips.c
+++ b/drivers/gpu/drm/imagination/pvr_vm_mips.c
@@ -152,8 +152,8 @@ pvr_vm_mips_map(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
u64 end;
u32 cache_policy;
u32 pte_flags;
- u32 start_pfn;
- u32 end_pfn;
+ s32 start_pfn;
+ s32 end_pfn;
s32 pfn;
int err;
@@ -201,7 +201,7 @@ pvr_vm_mips_map(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
return 0;
err_unmap_pages:
- for (; pfn >= start_pfn; pfn--)
+ while (--pfn >= start_pfn)
WRITE_ONCE(mips_data->pt[pfn], 0);
pvr_mmu_flush_request_all(pvr_dev);
diff --git a/drivers/gpu/drm/lima/lima_ctx.c b/drivers/gpu/drm/lima/lima_ctx.c
index 891d5cd5019a..8389f2d7d021 100644
--- a/drivers/gpu/drm/lima/lima_ctx.c
+++ b/drivers/gpu/drm/lima/lima_ctx.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
+#include <linux/pid.h>
#include <linux/slab.h>
#include "lima_device.h"
diff --git a/drivers/gpu/drm/loongson/lsdc_i2c.c b/drivers/gpu/drm/loongson/lsdc_i2c.c
index 9625d0b1d0b4..ce90c25536d2 100644
--- a/drivers/gpu/drm/loongson/lsdc_i2c.c
+++ b/drivers/gpu/drm/loongson/lsdc_i2c.c
@@ -154,7 +154,6 @@ int lsdc_create_i2c_chan(struct drm_device *ddev,
adapter = &li2c->adapter;
adapter->algo_data = &li2c->bit;
adapter->owner = THIS_MODULE;
- adapter->class = I2C_CLASS_DDC;
adapter->dev.parent = ddev->dev;
adapter->nr = -1;
diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile
index d4d193f60271..5e4436403b8d 100644
--- a/drivers/gpu/drm/mediatek/Makefile
+++ b/drivers/gpu/drm/mediatek/Makefile
@@ -16,7 +16,8 @@ mediatek-drm-y := mtk_disp_aal.o \
mtk_dsi.o \
mtk_dpi.o \
mtk_ethdr.o \
- mtk_mdp_rdma.o
+ mtk_mdp_rdma.o \
+ mtk_padding.o
obj-$(CONFIG_DRM_MEDIATEK) += mediatek-drm.o
diff --git a/drivers/gpu/drm/mediatek/mtk_cec.c b/drivers/gpu/drm/mediatek/mtk_cec.c
index f47f417d8ba6..8519e9bade36 100644
--- a/drivers/gpu/drm/mediatek/mtk_cec.c
+++ b/drivers/gpu/drm/mediatek/mtk_cec.c
@@ -185,7 +185,6 @@ static int mtk_cec_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_cec *cec;
- struct resource *res;
int ret;
cec = devm_kzalloc(dev, sizeof(*cec), GFP_KERNEL);
@@ -195,8 +194,7 @@ static int mtk_cec_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, cec);
spin_lock_init(&cec->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- cec->regs = devm_ioremap_resource(dev, res);
+ cec->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(cec->regs)) {
ret = PTR_ERR(cec->regs);
dev_err(dev, "Failed to ioremap cec: %d\n", ret);
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_aal.c b/drivers/gpu/drm/mediatek/mtk_disp_aal.c
index 2209159d8855..40fe403086c3 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_aal.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_aal.c
@@ -168,7 +168,6 @@ static int mtk_disp_aal_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_disp_aal *priv;
- struct resource *res;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -181,8 +180,7 @@ static int mtk_disp_aal_probe(struct platform_device *pdev)
return PTR_ERR(priv->clk);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->regs = devm_ioremap_resource(dev, res);
+ priv->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->regs)) {
dev_err(dev, "failed to ioremap aal\n");
return PTR_ERR(priv->regs);
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c b/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c
index 4234ff7485e8..465cddce0d32 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c
@@ -153,7 +153,6 @@ static int mtk_disp_ccorr_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_disp_ccorr *priv;
- struct resource *res;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -166,8 +165,7 @@ static int mtk_disp_ccorr_probe(struct platform_device *pdev)
return PTR_ERR(priv->clk);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->regs = devm_ioremap_resource(dev, res);
+ priv->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->regs)) {
dev_err(dev, "failed to ioremap ccorr\n");
return PTR_ERR(priv->regs);
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_drv.h b/drivers/gpu/drm/mediatek/mtk_disp_drv.h
index 1311562d25cc..74fa56339383 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_drv.h
+++ b/drivers/gpu/drm/mediatek/mtk_disp_drv.h
@@ -110,6 +110,8 @@ void mtk_ovl_adaptor_connect(struct device *dev, struct device *mmsys_dev,
unsigned int next);
void mtk_ovl_adaptor_disconnect(struct device *dev, struct device *mmsys_dev,
unsigned int next);
+int mtk_ovl_adaptor_power_on(struct device *dev);
+void mtk_ovl_adaptor_power_off(struct device *dev);
int mtk_ovl_adaptor_clk_enable(struct device *dev);
void mtk_ovl_adaptor_clk_disable(struct device *dev);
void mtk_ovl_adaptor_config(struct device *dev, unsigned int w,
@@ -151,6 +153,8 @@ void mtk_rdma_disable_vblank(struct device *dev);
const u32 *mtk_rdma_get_formats(struct device *dev);
size_t mtk_rdma_get_num_formats(struct device *dev);
+int mtk_mdp_rdma_power_on(struct device *dev);
+void mtk_mdp_rdma_power_off(struct device *dev);
int mtk_mdp_rdma_clk_enable(struct device *dev);
void mtk_mdp_rdma_clk_disable(struct device *dev);
void mtk_mdp_rdma_start(struct device *dev, struct cmdq_pkt *cmdq_pkt);
@@ -160,4 +164,8 @@ void mtk_mdp_rdma_config(struct device *dev, struct mtk_mdp_rdma_cfg *cfg,
const u32 *mtk_mdp_rdma_get_formats(struct device *dev);
size_t mtk_mdp_rdma_get_num_formats(struct device *dev);
+int mtk_padding_clk_enable(struct device *dev);
+void mtk_padding_clk_disable(struct device *dev);
+void mtk_padding_start(struct device *dev);
+void mtk_padding_stop(struct device *dev);
#endif
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_gamma.c b/drivers/gpu/drm/mediatek/mtk_disp_gamma.c
index f81dc34c9c3e..c1bc8b00d938 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_gamma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_gamma.c
@@ -203,7 +203,7 @@ void mtk_gamma_set(struct device *dev, struct drm_crtc_state *state)
/* Disable RELAY mode to pass the processed image */
cfg_val &= ~GAMMA_RELAY_MODE;
- cfg_val = readl(gamma->regs + DISP_GAMMA_CFG);
+ writel(cfg_val, gamma->regs + DISP_GAMMA_CFG);
}
void mtk_gamma_config(struct device *dev, unsigned int w,
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_merge.c b/drivers/gpu/drm/mediatek/mtk_disp_merge.c
index e525a6b9e5b0..22f768d923d5 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_merge.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_merge.c
@@ -103,7 +103,7 @@ void mtk_merge_stop_cmdq(struct device *dev, struct cmdq_pkt *cmdq_pkt)
mtk_ddp_write(cmdq_pkt, 0, &priv->cmdq_reg, priv->regs,
DISP_REG_MERGE_CTRL);
- if (priv->async_clk)
+ if (!cmdq_pkt && priv->async_clk)
reset_control_reset(priv->reset_ctl);
}
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
index 3fdef3ad4ffd..12a37f740bf4 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
@@ -27,13 +27,14 @@
#define MTK_OVL_ADAPTOR_LAYER_NUM 4
enum mtk_ovl_adaptor_comp_type {
- OVL_ADAPTOR_TYPE_RDMA = 0,
- OVL_ADAPTOR_TYPE_MERGE,
OVL_ADAPTOR_TYPE_ETHDR,
+ OVL_ADAPTOR_TYPE_MDP_RDMA,
+ OVL_ADAPTOR_TYPE_MERGE,
OVL_ADAPTOR_TYPE_NUM,
};
enum mtk_ovl_adaptor_comp_id {
+ OVL_ADAPTOR_ETHDR0,
OVL_ADAPTOR_MDP_RDMA0,
OVL_ADAPTOR_MDP_RDMA1,
OVL_ADAPTOR_MDP_RDMA2,
@@ -46,13 +47,14 @@ enum mtk_ovl_adaptor_comp_id {
OVL_ADAPTOR_MERGE1,
OVL_ADAPTOR_MERGE2,
OVL_ADAPTOR_MERGE3,
- OVL_ADAPTOR_ETHDR0,
OVL_ADAPTOR_ID_MAX
};
struct ovl_adaptor_comp_match {
enum mtk_ovl_adaptor_comp_type type;
+ enum mtk_ddp_comp_id comp_id;
int alias_id;
+ const struct mtk_ddp_comp_funcs *funcs;
};
struct mtk_disp_ovl_adaptor {
@@ -62,25 +64,44 @@ struct mtk_disp_ovl_adaptor {
};
static const char * const private_comp_stem[OVL_ADAPTOR_TYPE_NUM] = {
- [OVL_ADAPTOR_TYPE_RDMA] = "vdo1-rdma",
- [OVL_ADAPTOR_TYPE_MERGE] = "merge",
[OVL_ADAPTOR_TYPE_ETHDR] = "ethdr",
+ [OVL_ADAPTOR_TYPE_MDP_RDMA] = "vdo1-rdma",
+ [OVL_ADAPTOR_TYPE_MERGE] = "merge",
+};
+
+static const struct mtk_ddp_comp_funcs ethdr = {
+ .clk_enable = mtk_ethdr_clk_enable,
+ .clk_disable = mtk_ethdr_clk_disable,
+ .start = mtk_ethdr_start,
+ .stop = mtk_ethdr_stop,
+};
+
+static const struct mtk_ddp_comp_funcs merge = {
+ .clk_enable = mtk_merge_clk_enable,
+ .clk_disable = mtk_merge_clk_disable,
+};
+
+static const struct mtk_ddp_comp_funcs rdma = {
+ .power_on = mtk_mdp_rdma_power_on,
+ .power_off = mtk_mdp_rdma_power_off,
+ .clk_enable = mtk_mdp_rdma_clk_enable,
+ .clk_disable = mtk_mdp_rdma_clk_disable,
};
static const struct ovl_adaptor_comp_match comp_matches[OVL_ADAPTOR_ID_MAX] = {
- [OVL_ADAPTOR_MDP_RDMA0] = { OVL_ADAPTOR_TYPE_RDMA, 0 },
- [OVL_ADAPTOR_MDP_RDMA1] = { OVL_ADAPTOR_TYPE_RDMA, 1 },
- [OVL_ADAPTOR_MDP_RDMA2] = { OVL_ADAPTOR_TYPE_RDMA, 2 },
- [OVL_ADAPTOR_MDP_RDMA3] = { OVL_ADAPTOR_TYPE_RDMA, 3 },
- [OVL_ADAPTOR_MDP_RDMA4] = { OVL_ADAPTOR_TYPE_RDMA, 4 },
- [OVL_ADAPTOR_MDP_RDMA5] = { OVL_ADAPTOR_TYPE_RDMA, 5 },
- [OVL_ADAPTOR_MDP_RDMA6] = { OVL_ADAPTOR_TYPE_RDMA, 6 },
- [OVL_ADAPTOR_MDP_RDMA7] = { OVL_ADAPTOR_TYPE_RDMA, 7 },
- [OVL_ADAPTOR_MERGE0] = { OVL_ADAPTOR_TYPE_MERGE, 1 },
- [OVL_ADAPTOR_MERGE1] = { OVL_ADAPTOR_TYPE_MERGE, 2 },
- [OVL_ADAPTOR_MERGE2] = { OVL_ADAPTOR_TYPE_MERGE, 3 },
- [OVL_ADAPTOR_MERGE3] = { OVL_ADAPTOR_TYPE_MERGE, 4 },
- [OVL_ADAPTOR_ETHDR0] = { OVL_ADAPTOR_TYPE_ETHDR, 0 },
+ [OVL_ADAPTOR_ETHDR0] = { OVL_ADAPTOR_TYPE_ETHDR, DDP_COMPONENT_ETHDR_MIXER, 0, &ethdr },
+ [OVL_ADAPTOR_MDP_RDMA0] = { OVL_ADAPTOR_TYPE_MDP_RDMA, DDP_COMPONENT_MDP_RDMA0, 0, &rdma },
+ [OVL_ADAPTOR_MDP_RDMA1] = { OVL_ADAPTOR_TYPE_MDP_RDMA, DDP_COMPONENT_MDP_RDMA1, 1, &rdma },
+ [OVL_ADAPTOR_MDP_RDMA2] = { OVL_ADAPTOR_TYPE_MDP_RDMA, DDP_COMPONENT_MDP_RDMA2, 2, &rdma },
+ [OVL_ADAPTOR_MDP_RDMA3] = { OVL_ADAPTOR_TYPE_MDP_RDMA, DDP_COMPONENT_MDP_RDMA3, 3, &rdma },
+ [OVL_ADAPTOR_MDP_RDMA4] = { OVL_ADAPTOR_TYPE_MDP_RDMA, DDP_COMPONENT_MDP_RDMA4, 4, &rdma },
+ [OVL_ADAPTOR_MDP_RDMA5] = { OVL_ADAPTOR_TYPE_MDP_RDMA, DDP_COMPONENT_MDP_RDMA5, 5, &rdma },
+ [OVL_ADAPTOR_MDP_RDMA6] = { OVL_ADAPTOR_TYPE_MDP_RDMA, DDP_COMPONENT_MDP_RDMA6, 6, &rdma },
+ [OVL_ADAPTOR_MDP_RDMA7] = { OVL_ADAPTOR_TYPE_MDP_RDMA, DDP_COMPONENT_MDP_RDMA7, 7, &rdma },
+ [OVL_ADAPTOR_MERGE0] = { OVL_ADAPTOR_TYPE_MERGE, DDP_COMPONENT_MERGE1, 1, &merge },
+ [OVL_ADAPTOR_MERGE1] = { OVL_ADAPTOR_TYPE_MERGE, DDP_COMPONENT_MERGE2, 2, &merge },
+ [OVL_ADAPTOR_MERGE2] = { OVL_ADAPTOR_TYPE_MERGE, DDP_COMPONENT_MERGE3, 3, &merge },
+ [OVL_ADAPTOR_MERGE3] = { OVL_ADAPTOR_TYPE_MERGE, DDP_COMPONENT_MERGE4, 4, &merge },
};
void mtk_ovl_adaptor_layer_config(struct device *dev, unsigned int idx,
@@ -172,68 +193,112 @@ void mtk_ovl_adaptor_config(struct device *dev, unsigned int w,
void mtk_ovl_adaptor_start(struct device *dev)
{
+ int i;
struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev);
- mtk_ethdr_start(ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_ETHDR0]);
+ for (i = 0; i < OVL_ADAPTOR_ID_MAX; i++) {
+ if (!ovl_adaptor->ovl_adaptor_comp[i] ||
+ !comp_matches[i].funcs->start)
+ continue;
+
+ comp_matches[i].funcs->start(ovl_adaptor->ovl_adaptor_comp[i]);
+ }
}
void mtk_ovl_adaptor_stop(struct device *dev)
{
+ int i;
struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev);
- mtk_ethdr_stop(ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_ETHDR0]);
+ for (i = 0; i < OVL_ADAPTOR_ID_MAX; i++) {
+ if (!ovl_adaptor->ovl_adaptor_comp[i] ||
+ !comp_matches[i].funcs->stop)
+ continue;
+
+ comp_matches[i].funcs->stop(ovl_adaptor->ovl_adaptor_comp[i]);
+ }
}
-int mtk_ovl_adaptor_clk_enable(struct device *dev)
+/**
+ * power_off - Power off the devices in OVL adaptor
+ * @dev: Device to be powered off
+ * @num: Number of the devices to be powered off
+ *
+ * Calls the .power_off() ovl_adaptor component callback if it is present.
+ */
+static inline void power_off(struct device *dev, int num)
{
struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev);
- struct device *comp;
- int ret;
int i;
- for (i = 0; i < OVL_ADAPTOR_MERGE0; i++) {
- comp = ovl_adaptor->ovl_adaptor_comp[i];
- ret = pm_runtime_get_sync(comp);
+ if (num > OVL_ADAPTOR_ID_MAX)
+ num = OVL_ADAPTOR_ID_MAX;
+
+ for (i = num - 1; i >= 0; i--) {
+ if (!ovl_adaptor->ovl_adaptor_comp[i] ||
+ !comp_matches[i].funcs->power_off)
+ continue;
+
+ comp_matches[i].funcs->power_off(ovl_adaptor->ovl_adaptor_comp[i]);
+ }
+}
+
+/**
+ * mtk_ovl_adaptor_power_on - Power on the devices in OVL adaptor
+ * @dev: Device to be powered on
+ *
+ * Different from OVL, OVL adaptor is a pseudo device so
+ * we didn't define it in the device tree, pm_runtime_resume_and_get()
+ * called by .atomic_enable() power on no device in OVL adaptor,
+ * we have to implement a function to do the job instead.
+ *
+ * Return: Zero for success or negative number for failure.
+ */
+int mtk_ovl_adaptor_power_on(struct device *dev)
+{
+ struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev);
+ int i, ret;
+
+ for (i = 0; i < OVL_ADAPTOR_ID_MAX; i++) {
+ if (!ovl_adaptor->ovl_adaptor_comp[i] ||
+ !comp_matches[i].funcs->power_on)
+ continue;
+
+ ret = comp_matches[i].funcs->power_on(ovl_adaptor->ovl_adaptor_comp[i]);
if (ret < 0) {
dev_err(dev, "Failed to enable power domain %d, err %d\n", i, ret);
- goto pwr_err;
+ power_off(dev, i);
+ return ret;
}
}
+ return 0;
+}
+
+void mtk_ovl_adaptor_power_off(struct device *dev)
+{
+ power_off(dev, OVL_ADAPTOR_ID_MAX);
+}
+
+int mtk_ovl_adaptor_clk_enable(struct device *dev)
+{
+ struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev);
+ struct device *comp;
+ int ret;
+ int i;
for (i = 0; i < OVL_ADAPTOR_ID_MAX; i++) {
comp = ovl_adaptor->ovl_adaptor_comp[i];
-
- if (i < OVL_ADAPTOR_MERGE0)
- ret = mtk_mdp_rdma_clk_enable(comp);
- else if (i < OVL_ADAPTOR_ETHDR0)
- ret = mtk_merge_clk_enable(comp);
- else
- ret = mtk_ethdr_clk_enable(comp);
+ if (!comp || !comp_matches[i].funcs->clk_enable)
+ continue;
+ ret = comp_matches[i].funcs->clk_enable(comp);
if (ret) {
dev_err(dev, "Failed to enable clock %d, err %d\n", i, ret);
- goto clk_err;
+ while (--i >= 0)
+ comp_matches[i].funcs->clk_disable(comp);
+ return ret;
}
}
-
- return ret;
-
-clk_err:
- while (--i >= 0) {
- comp = ovl_adaptor->ovl_adaptor_comp[i];
- if (i < OVL_ADAPTOR_MERGE0)
- mtk_mdp_rdma_clk_disable(comp);
- else if (i < OVL_ADAPTOR_ETHDR0)
- mtk_merge_clk_disable(comp);
- else
- mtk_ethdr_clk_disable(comp);
- }
- i = OVL_ADAPTOR_MERGE0;
-
-pwr_err:
- while (--i >= 0)
- pm_runtime_put(ovl_adaptor->ovl_adaptor_comp[i]);
-
- return ret;
+ return 0;
}
void mtk_ovl_adaptor_clk_disable(struct device *dev)
@@ -244,15 +309,11 @@ void mtk_ovl_adaptor_clk_disable(struct device *dev)
for (i = 0; i < OVL_ADAPTOR_ID_MAX; i++) {
comp = ovl_adaptor->ovl_adaptor_comp[i];
-
- if (i < OVL_ADAPTOR_MERGE0) {
- mtk_mdp_rdma_clk_disable(comp);
+ if (!comp || !comp_matches[i].funcs->clk_disable)
+ continue;
+ comp_matches[i].funcs->clk_disable(comp);
+ if (i < OVL_ADAPTOR_MERGE0)
pm_runtime_put(comp);
- } else if (i < OVL_ADAPTOR_ETHDR0) {
- mtk_merge_clk_disable(comp);
- } else {
- mtk_ethdr_clk_disable(comp);
- }
}
}
@@ -314,40 +375,31 @@ size_t mtk_ovl_adaptor_get_num_formats(struct device *dev)
void mtk_ovl_adaptor_add_comp(struct device *dev, struct mtk_mutex *mutex)
{
- mtk_mutex_add_comp(mutex, DDP_COMPONENT_MDP_RDMA0);
- mtk_mutex_add_comp(mutex, DDP_COMPONENT_MDP_RDMA1);
- mtk_mutex_add_comp(mutex, DDP_COMPONENT_MDP_RDMA2);
- mtk_mutex_add_comp(mutex, DDP_COMPONENT_MDP_RDMA3);
- mtk_mutex_add_comp(mutex, DDP_COMPONENT_MDP_RDMA4);
- mtk_mutex_add_comp(mutex, DDP_COMPONENT_MDP_RDMA5);
- mtk_mutex_add_comp(mutex, DDP_COMPONENT_MDP_RDMA6);
- mtk_mutex_add_comp(mutex, DDP_COMPONENT_MDP_RDMA7);
- mtk_mutex_add_comp(mutex, DDP_COMPONENT_MERGE1);
- mtk_mutex_add_comp(mutex, DDP_COMPONENT_MERGE2);
- mtk_mutex_add_comp(mutex, DDP_COMPONENT_MERGE3);
- mtk_mutex_add_comp(mutex, DDP_COMPONENT_MERGE4);
- mtk_mutex_add_comp(mutex, DDP_COMPONENT_ETHDR_MIXER);
+ int i;
+ struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev);
+
+ for (i = 0; i < OVL_ADAPTOR_ID_MAX; i++) {
+ if (!ovl_adaptor->ovl_adaptor_comp[i])
+ continue;
+ mtk_mutex_add_comp(mutex, comp_matches[i].comp_id);
+ }
}
void mtk_ovl_adaptor_remove_comp(struct device *dev, struct mtk_mutex *mutex)
{
- mtk_mutex_remove_comp(mutex, DDP_COMPONENT_MDP_RDMA0);
- mtk_mutex_remove_comp(mutex, DDP_COMPONENT_MDP_RDMA1);
- mtk_mutex_remove_comp(mutex, DDP_COMPONENT_MDP_RDMA2);
- mtk_mutex_remove_comp(mutex, DDP_COMPONENT_MDP_RDMA3);
- mtk_mutex_remove_comp(mutex, DDP_COMPONENT_MDP_RDMA4);
- mtk_mutex_remove_comp(mutex, DDP_COMPONENT_MDP_RDMA5);
- mtk_mutex_remove_comp(mutex, DDP_COMPONENT_MDP_RDMA6);
- mtk_mutex_remove_comp(mutex, DDP_COMPONENT_MDP_RDMA7);
- mtk_mutex_remove_comp(mutex, DDP_COMPONENT_MERGE1);
- mtk_mutex_remove_comp(mutex, DDP_COMPONENT_MERGE2);
- mtk_mutex_remove_comp(mutex, DDP_COMPONENT_MERGE3);
- mtk_mutex_remove_comp(mutex, DDP_COMPONENT_MERGE4);
- mtk_mutex_remove_comp(mutex, DDP_COMPONENT_ETHDR_MIXER);
+ int i;
+ struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev);
+
+ for (i = 0; i < OVL_ADAPTOR_ID_MAX; i++) {
+ if (!ovl_adaptor->ovl_adaptor_comp[i])
+ continue;
+ mtk_mutex_remove_comp(mutex, comp_matches[i].comp_id);
+ }
}
void mtk_ovl_adaptor_connect(struct device *dev, struct device *mmsys_dev, unsigned int next)
{
+ mtk_mmsys_ddp_connect(mmsys_dev, DDP_COMPONENT_ETHDR_MIXER, next);
mtk_mmsys_ddp_connect(mmsys_dev, DDP_COMPONENT_MDP_RDMA0, DDP_COMPONENT_MERGE1);
mtk_mmsys_ddp_connect(mmsys_dev, DDP_COMPONENT_MDP_RDMA1, DDP_COMPONENT_MERGE1);
mtk_mmsys_ddp_connect(mmsys_dev, DDP_COMPONENT_MDP_RDMA2, DDP_COMPONENT_MERGE2);
@@ -355,11 +407,11 @@ void mtk_ovl_adaptor_connect(struct device *dev, struct device *mmsys_dev, unsig
mtk_mmsys_ddp_connect(mmsys_dev, DDP_COMPONENT_MERGE2, DDP_COMPONENT_ETHDR_MIXER);
mtk_mmsys_ddp_connect(mmsys_dev, DDP_COMPONENT_MERGE3, DDP_COMPONENT_ETHDR_MIXER);
mtk_mmsys_ddp_connect(mmsys_dev, DDP_COMPONENT_MERGE4, DDP_COMPONENT_ETHDR_MIXER);
- mtk_mmsys_ddp_connect(mmsys_dev, DDP_COMPONENT_ETHDR_MIXER, next);
}
void mtk_ovl_adaptor_disconnect(struct device *dev, struct device *mmsys_dev, unsigned int next)
{
+ mtk_mmsys_ddp_disconnect(mmsys_dev, DDP_COMPONENT_ETHDR_MIXER, next);
mtk_mmsys_ddp_disconnect(mmsys_dev, DDP_COMPONENT_MDP_RDMA0, DDP_COMPONENT_MERGE1);
mtk_mmsys_ddp_disconnect(mmsys_dev, DDP_COMPONENT_MDP_RDMA1, DDP_COMPONENT_MERGE1);
mtk_mmsys_ddp_disconnect(mmsys_dev, DDP_COMPONENT_MDP_RDMA2, DDP_COMPONENT_MERGE2);
@@ -367,7 +419,6 @@ void mtk_ovl_adaptor_disconnect(struct device *dev, struct device *mmsys_dev, un
mtk_mmsys_ddp_disconnect(mmsys_dev, DDP_COMPONENT_MERGE2, DDP_COMPONENT_ETHDR_MIXER);
mtk_mmsys_ddp_disconnect(mmsys_dev, DDP_COMPONENT_MERGE3, DDP_COMPONENT_ETHDR_MIXER);
mtk_mmsys_ddp_disconnect(mmsys_dev, DDP_COMPONENT_MERGE4, DDP_COMPONENT_ETHDR_MIXER);
- mtk_mmsys_ddp_disconnect(mmsys_dev, DDP_COMPONENT_ETHDR_MIXER, next);
}
static int ovl_adaptor_comp_get_id(struct device *dev, struct device_node *node,
@@ -386,17 +437,10 @@ static int ovl_adaptor_comp_get_id(struct device *dev, struct device_node *node,
}
static const struct of_device_id mtk_ovl_adaptor_comp_dt_ids[] = {
- {
- .compatible = "mediatek,mt8195-vdo1-rdma",
- .data = (void *)OVL_ADAPTOR_TYPE_RDMA,
- }, {
- .compatible = "mediatek,mt8195-disp-merge",
- .data = (void *)OVL_ADAPTOR_TYPE_MERGE,
- }, {
- .compatible = "mediatek,mt8195-disp-ethdr",
- .data = (void *)OVL_ADAPTOR_TYPE_ETHDR,
- },
- {},
+ { .compatible = "mediatek,mt8195-disp-ethdr", .data = (void *)OVL_ADAPTOR_TYPE_ETHDR },
+ { .compatible = "mediatek,mt8195-disp-merge", .data = (void *)OVL_ADAPTOR_TYPE_MERGE },
+ { .compatible = "mediatek,mt8195-vdo1-rdma", .data = (void *)OVL_ADAPTOR_TYPE_MDP_RDMA },
+ { /* sentinel */ }
};
static int compare_of(struct device *dev, void *data)
diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
index e4c16ba9902d..2136a596efa1 100644
--- a/drivers/gpu/drm/mediatek/mtk_dp.c
+++ b/drivers/gpu/drm/mediatek/mtk_dp.c
@@ -2818,3 +2818,4 @@ MODULE_AUTHOR("Markus Schneider-Pargmann <msp@baylibre.com>");
MODULE_AUTHOR("Bo-Chen Chen <rex-bc.chen@mediatek.com>");
MODULE_DESCRIPTION("MediaTek DisplayPort Driver");
MODULE_LICENSE("GPL");
+MODULE_SOFTDEP("pre: phy_mtk_dp");
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index 4e3d9f7b4d8c..beb7d9d08e97 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -966,20 +966,6 @@ static const struct mtk_dpi_conf mt8186_conf = {
.csc_enable_bit = CSC_ENABLE,
};
-static const struct mtk_dpi_conf mt8188_dpintf_conf = {
- .cal_factor = mt8195_dpintf_calculate_factor,
- .max_clock_khz = 600000,
- .output_fmts = mt8195_output_fmts,
- .num_output_fmts = ARRAY_SIZE(mt8195_output_fmts),
- .pixels_per_iter = 4,
- .input_2pixel = false,
- .dimension_mask = DPINTF_HPW_MASK,
- .hvsize_mask = DPINTF_HSIZE_MASK,
- .channel_swap_shift = DPINTF_CH_SWAP,
- .yuv422_en_bit = DPINTF_YUV422_EN,
- .csc_enable_bit = DPINTF_CSC_ENABLE,
-};
-
static const struct mtk_dpi_conf mt8192_conf = {
.cal_factor = mt8183_calculate_factor,
.reg_h_fre_con = 0xe0,
@@ -1103,7 +1089,7 @@ static const struct of_device_id mtk_dpi_of_ids[] = {
{ .compatible = "mediatek,mt8173-dpi", .data = &mt8173_conf },
{ .compatible = "mediatek,mt8183-dpi", .data = &mt8183_conf },
{ .compatible = "mediatek,mt8186-dpi", .data = &mt8186_conf },
- { .compatible = "mediatek,mt8188-dp-intf", .data = &mt8188_dpintf_conf },
+ { .compatible = "mediatek,mt8188-dp-intf", .data = &mt8195_dpintf_conf },
{ .compatible = "mediatek,mt8192-dpi", .data = &mt8192_conf },
{ .compatible = "mediatek,mt8195-dp-intf", .data = &mt8195_dpintf_conf },
{ /* sentinel */ },
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index c277b9fae950..c729af3b9822 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -721,7 +721,7 @@ static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id);
- ret = pm_runtime_resume_and_get(comp->dev);
+ ret = mtk_ddp_comp_power_on(comp);
if (ret < 0) {
DRM_DEV_ERROR(comp->dev, "Failed to enable power domain: %d\n", ret);
return;
@@ -731,7 +731,7 @@ static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
ret = mtk_crtc_ddp_hw_init(mtk_crtc);
if (ret) {
- pm_runtime_put(comp->dev);
+ mtk_ddp_comp_power_off(comp);
return;
}
@@ -744,7 +744,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
- int i, ret;
+ int i;
DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id);
if (!mtk_crtc->enabled)
@@ -774,9 +774,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
drm_crtc_vblank_off(crtc);
mtk_crtc_ddp_hw_fini(mtk_crtc);
- ret = pm_runtime_put(comp->dev);
- if (ret < 0)
- DRM_DEV_ERROR(comp->dev, "Failed to disable power domain: %d\n", ret);
+ mtk_ddp_comp_power_off(comp);
mtk_crtc->enabled = false;
}
@@ -788,6 +786,7 @@ static void mtk_drm_crtc_atomic_begin(struct drm_crtc *crtc,
crtc);
struct mtk_crtc_state *mtk_crtc_state = to_mtk_crtc_state(crtc_state);
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ unsigned long flags;
if (mtk_crtc->event && mtk_crtc_state->base.event)
DRM_ERROR("new event while there is still a pending event\n");
@@ -795,7 +794,11 @@ static void mtk_drm_crtc_atomic_begin(struct drm_crtc *crtc,
if (mtk_crtc_state->base.event) {
mtk_crtc_state->base.event->pipe = drm_crtc_index(crtc);
WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
mtk_crtc->event = mtk_crtc_state->base.event;
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
mtk_crtc_state->base.event = NULL;
}
}
@@ -921,7 +924,14 @@ static int mtk_drm_crtc_init_comp_planes(struct drm_device *drm_dev,
struct device *mtk_drm_crtc_dma_dev_get(struct drm_crtc *crtc)
{
- struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_drm_crtc *mtk_crtc = NULL;
+
+ if (!crtc)
+ return NULL;
+
+ mtk_crtc = to_mtk_crtc(crtc);
+ if (!mtk_crtc)
+ return NULL;
return mtk_crtc->dma_dev;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index 3046c0409353..a9b5a21cde2d 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -398,6 +398,8 @@ static const struct mtk_ddp_comp_funcs ddp_ufoe = {
};
static const struct mtk_ddp_comp_funcs ddp_ovl_adaptor = {
+ .power_on = mtk_ovl_adaptor_power_on,
+ .power_off = mtk_ovl_adaptor_power_off,
.clk_enable = mtk_ovl_adaptor_clk_enable,
.clk_disable = mtk_ovl_adaptor_clk_disable,
.config = mtk_ovl_adaptor_config,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
index 4bae55bdb034..15b2eafff438 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
@@ -7,6 +7,7 @@
#define MTK_DRM_DDP_COMP_H
#include <linux/io.h>
+#include <linux/pm_runtime.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
#include <linux/soc/mediatek/mtk-mmsys.h>
#include <linux/soc/mediatek/mtk-mutex.h>
@@ -46,6 +47,8 @@ enum mtk_ddp_comp_type {
struct mtk_ddp_comp;
struct cmdq_pkt;
struct mtk_ddp_comp_funcs {
+ int (*power_on)(struct device *dev);
+ void (*power_off)(struct device *dev);
int (*clk_enable)(struct device *dev);
void (*clk_disable)(struct device *dev);
void (*config)(struct device *dev, unsigned int w,
@@ -92,6 +95,23 @@ struct mtk_ddp_comp {
const struct mtk_ddp_comp_funcs *funcs;
};
+static inline int mtk_ddp_comp_power_on(struct mtk_ddp_comp *comp)
+{
+ if (comp->funcs && comp->funcs->power_on)
+ return comp->funcs->power_on(comp->dev);
+ else
+ return pm_runtime_resume_and_get(comp->dev);
+ return 0;
+}
+
+static inline void mtk_ddp_comp_power_off(struct mtk_ddp_comp *comp)
+{
+ if (comp->funcs && comp->funcs->power_off)
+ comp->funcs->power_off(comp->dev);
+ else
+ pm_runtime_put(comp->dev);
+}
+
static inline int mtk_ddp_comp_clk_enable(struct mtk_ddp_comp *comp)
{
if (comp->funcs && comp->funcs->clk_enable)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 2dfaa613276a..14a1e0157cc4 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -5,7 +5,6 @@
*/
#include <linux/component.h>
-#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
@@ -443,6 +442,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
struct mtk_drm_private *private = drm->dev_private;
struct mtk_drm_private *priv_n;
struct device *dma_dev = NULL;
+ struct drm_crtc *crtc;
int ret, i, j;
if (drm_firmware_drivers_only())
@@ -519,7 +519,9 @@ static int mtk_drm_kms_init(struct drm_device *drm)
}
/* Use OVL device for all DMA memory allocations */
- dma_dev = mtk_drm_crtc_dma_dev_get(drm_crtc_from_index(drm, 0));
+ crtc = drm_crtc_from_index(drm, 0);
+ if (crtc)
+ dma_dev = mtk_drm_crtc_dma_dev_get(crtc);
if (!dma_dev) {
ret = -ENODEV;
dev_err(drm->dev, "Need at least one OVL device\n");
@@ -608,9 +610,6 @@ static int mtk_drm_bind(struct device *dev)
struct drm_device *drm;
int ret, i;
- if (!iommu_present(&platform_bus_type))
- return -EPROBE_DEFER;
-
pdev = of_find_device_by_node(private->mutex_node);
if (!pdev) {
dev_err(dev, "Waiting for disp-mutex device %pOF\n",
@@ -1000,6 +999,7 @@ static struct platform_driver * const mtk_drm_drivers[] = {
&mtk_dsi_driver,
&mtk_ethdr_driver,
&mtk_mdp_rdma_driver,
+ &mtk_padding_driver,
};
static int __init mtk_drm_init(void)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
index 6f98fff4f1a4..33fadb08dc1c 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
@@ -77,5 +77,5 @@ extern struct platform_driver mtk_dpi_driver;
extern struct platform_driver mtk_dsi_driver;
extern struct platform_driver mtk_ethdr_driver;
extern struct platform_driver mtk_mdp_rdma_driver;
-
+extern struct platform_driver mtk_padding_driver;
#endif /* MTK_DRM_DRV_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c
index d675c954befe..54e46e440e0f 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c
@@ -297,7 +297,6 @@ static int mtk_hdmi_ddc_probe(struct platform_device *pdev)
strscpy(ddc->adap.name, "mediatek-hdmi-ddc", sizeof(ddc->adap.name));
ddc->adap.owner = THIS_MODULE;
- ddc->adap.class = I2C_CLASS_DDC;
ddc->adap.algo = &mtk_hdmi_ddc_algorithm;
ddc->adap.retries = 3;
ddc->adap.dev.of_node = dev->of_node;
diff --git a/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c b/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c
index c3adaeefd551..ee9ce9b6d078 100644
--- a/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c
@@ -242,12 +242,27 @@ size_t mtk_mdp_rdma_get_num_formats(struct device *dev)
return ARRAY_SIZE(formats);
}
+int mtk_mdp_rdma_power_on(struct device *dev)
+{
+ int ret = pm_runtime_resume_and_get(dev);
+
+ if (ret < 0) {
+ dev_err(dev, "Failed to power on: %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+void mtk_mdp_rdma_power_off(struct device *dev)
+{
+ pm_runtime_put(dev);
+}
+
int mtk_mdp_rdma_clk_enable(struct device *dev)
{
struct mtk_mdp_rdma *rdma = dev_get_drvdata(dev);
- clk_prepare_enable(rdma->clk);
- return 0;
+ return clk_prepare_enable(rdma->clk);
}
void mtk_mdp_rdma_clk_disable(struct device *dev)
diff --git a/drivers/gpu/drm/mediatek/mtk_padding.c b/drivers/gpu/drm/mediatek/mtk_padding.c
new file mode 100644
index 000000000000..0d6451c149b6
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_padding.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023 MediaTek Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/soc/mediatek/mtk-cmdq.h>
+
+#include "mtk_disp_drv.h"
+#include "mtk_drm_crtc.h"
+#include "mtk_drm_ddp_comp.h"
+
+#define PADDING_CONTROL_REG 0x00
+#define PADDING_BYPASS BIT(0)
+#define PADDING_ENABLE BIT(1)
+#define PADDING_PIC_SIZE_REG 0x04
+#define PADDING_H_REG 0x08 /* horizontal */
+#define PADDING_V_REG 0x0c /* vertical */
+#define PADDING_COLOR_REG 0x10
+
+/**
+ * struct mtk_padding - Basic information of the Padding
+ * @clk: Clock of the module
+ * @reg: Virtual address of the Padding for CPU to access
+ * @cmdq_reg: CMDQ setting of the Padding
+ *
+ * Every Padding should have different clock source, register base, and
+ * CMDQ settings, we stored these differences all together.
+ */
+struct mtk_padding {
+ struct clk *clk;
+ void __iomem *reg;
+ struct cmdq_client_reg cmdq_reg;
+};
+
+int mtk_padding_clk_enable(struct device *dev)
+{
+ struct mtk_padding *padding = dev_get_drvdata(dev);
+
+ return clk_prepare_enable(padding->clk);
+}
+
+void mtk_padding_clk_disable(struct device *dev)
+{
+ struct mtk_padding *padding = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(padding->clk);
+}
+
+void mtk_padding_start(struct device *dev)
+{
+ struct mtk_padding *padding = dev_get_drvdata(dev);
+
+ writel(PADDING_ENABLE | PADDING_BYPASS,
+ padding->reg + PADDING_CONTROL_REG);
+
+ /*
+ * Notice that even the padding is in bypass mode,
+ * all the settings must be cleared to 0 or
+ * undefined behaviors could happen
+ */
+ writel(0, padding->reg + PADDING_PIC_SIZE_REG);
+ writel(0, padding->reg + PADDING_H_REG);
+ writel(0, padding->reg + PADDING_V_REG);
+ writel(0, padding->reg + PADDING_COLOR_REG);
+}
+
+void mtk_padding_stop(struct device *dev)
+{
+ struct mtk_padding *padding = dev_get_drvdata(dev);
+
+ writel(0, padding->reg + PADDING_CONTROL_REG);
+}
+
+static int mtk_padding_bind(struct device *dev, struct device *master, void *data)
+{
+ return 0;
+}
+
+static void mtk_padding_unbind(struct device *dev, struct device *master, void *data)
+{
+}
+
+static const struct component_ops mtk_padding_component_ops = {
+ .bind = mtk_padding_bind,
+ .unbind = mtk_padding_unbind,
+};
+
+static int mtk_padding_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_padding *priv;
+ struct resource *res;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "failed to get clk\n");
+ return PTR_ERR(priv->clk);
+ }
+
+ priv->reg = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(priv->reg)) {
+ dev_err(dev, "failed to do ioremap\n");
+ return PTR_ERR(priv->reg);
+ }
+
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ ret = cmdq_dev_get_client_reg(dev, &priv->cmdq_reg, 0);
+ if (ret) {
+ dev_err(dev, "failed to get gce client reg\n");
+ return ret;
+ }
+#endif
+
+ platform_set_drvdata(pdev, priv);
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
+
+ ret = component_add(dev, &mtk_padding_component_ops);
+ if (ret) {
+ pm_runtime_disable(dev);
+ return dev_err_probe(dev, ret, "failed to add component\n");
+ }
+
+ return 0;
+}
+
+static int mtk_padding_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &mtk_padding_component_ops);
+ return 0;
+}
+
+static const struct of_device_id mtk_padding_driver_dt_match[] = {
+ { .compatible = "mediatek,mt8188-disp-padding" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mtk_padding_driver_dt_match);
+
+struct platform_driver mtk_padding_driver = {
+ .probe = mtk_padding_probe,
+ .remove = mtk_padding_remove,
+ .driver = {
+ .name = "mediatek-disp-padding",
+ .owner = THIS_MODULE,
+ .of_match_table = mtk_padding_driver_dt_match,
+ },
+};
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 57c7edcab602..765e49fd8911 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -392,6 +392,11 @@ void mgag200_primary_plane_helper_atomic_disable(struct drm_plane *plane,
.destroy = drm_plane_cleanup, \
DRM_GEM_SHADOW_PLANE_FUNCS
+void mgag200_crtc_set_gamma_linear(struct mga_device *mdev, const struct drm_format_info *format);
+void mgag200_crtc_set_gamma(struct mga_device *mdev,
+ const struct drm_format_info *format,
+ struct drm_color_lut *lut);
+
enum drm_mode_status mgag200_crtc_helper_mode_valid(struct drm_crtc *crtc,
const struct drm_display_mode *mode);
int mgag200_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *new_state);
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200er.c b/drivers/gpu/drm/mgag200/mgag200_g200er.c
index bce267e0f7de..8d4538b71047 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200er.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200er.c
@@ -202,6 +202,11 @@ static void mgag200_g200er_crtc_helper_atomic_enable(struct drm_crtc *crtc,
mgag200_g200er_reset_tagfifo(mdev);
+ if (crtc_state->gamma_lut)
+ mgag200_crtc_set_gamma(mdev, format, crtc_state->gamma_lut->data);
+ else
+ mgag200_crtc_set_gamma_linear(mdev, format);
+
mgag200_enable_display(mdev);
if (funcs->enable_vidrst)
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200ev.c b/drivers/gpu/drm/mgag200/mgag200_g200ev.c
index ac957f42abe1..56e6f986bff3 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200ev.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200ev.c
@@ -203,6 +203,11 @@ static void mgag200_g200ev_crtc_helper_atomic_enable(struct drm_crtc *crtc,
mgag200_g200ev_set_hiprilvl(mdev);
+ if (crtc_state->gamma_lut)
+ mgag200_crtc_set_gamma(mdev, format, crtc_state->gamma_lut->data);
+ else
+ mgag200_crtc_set_gamma_linear(mdev, format);
+
mgag200_enable_display(mdev);
if (funcs->enable_vidrst)
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200se.c b/drivers/gpu/drm/mgag200/mgag200_g200se.c
index bd6e573c9a1a..ff2b3c6622e7 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200se.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200se.c
@@ -334,6 +334,11 @@ static void mgag200_g200se_crtc_helper_atomic_enable(struct drm_crtc *crtc,
mgag200_g200se_set_hiprilvl(mdev, adjusted_mode, format);
+ if (crtc_state->gamma_lut)
+ mgag200_crtc_set_gamma(mdev, format, crtc_state->gamma_lut->data);
+ else
+ mgag200_crtc_set_gamma_linear(mdev, format);
+
mgag200_enable_display(mdev);
if (funcs->enable_vidrst)
diff --git a/drivers/gpu/drm/mgag200/mgag200_i2c.c b/drivers/gpu/drm/mgag200/mgag200_i2c.c
index 0c48bdf3e7f8..423eb302be7e 100644
--- a/drivers/gpu/drm/mgag200/mgag200_i2c.c
+++ b/drivers/gpu/drm/mgag200/mgag200_i2c.c
@@ -106,7 +106,6 @@ int mgag200_i2c_init(struct mga_device *mdev, struct mga_i2c_chan *i2c)
i2c->data = BIT(info->i2c.data_bit);
i2c->clock = BIT(info->i2c.clock_bit);
i2c->adapter.owner = THIS_MODULE;
- i2c->adapter.class = I2C_CLASS_DDC;
i2c->adapter.dev.parent = dev->dev;
i2c->dev = dev;
i2c_set_adapdata(&i2c->adapter, i2c);
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index af3ce5a6a636..0f0d59938c3a 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -28,8 +28,8 @@
* This file contains setup code for the CRTC.
*/
-static void mgag200_crtc_set_gamma_linear(struct mga_device *mdev,
- const struct drm_format_info *format)
+void mgag200_crtc_set_gamma_linear(struct mga_device *mdev,
+ const struct drm_format_info *format)
{
int i;
@@ -65,9 +65,9 @@ static void mgag200_crtc_set_gamma_linear(struct mga_device *mdev,
}
}
-static void mgag200_crtc_set_gamma(struct mga_device *mdev,
- const struct drm_format_info *format,
- struct drm_color_lut *lut)
+void mgag200_crtc_set_gamma(struct mga_device *mdev,
+ const struct drm_format_info *format,
+ struct drm_color_lut *lut)
{
int i;
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 6309a857ca31..f202f26adab2 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -6,6 +6,7 @@ config DRM_MSM
depends on ARCH_QCOM || SOC_IMX5 || COMPILE_TEST
depends on COMMON_CLK
depends on IOMMU_SUPPORT
+ depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n
depends on QCOM_OCMEM || QCOM_OCMEM=n
depends on QCOM_LLCC || QCOM_LLCC=n
depends on QCOM_COMMAND_DB || QCOM_COMMAND_DB=n
@@ -16,6 +17,7 @@ config DRM_MSM
select DRM_DP_AUX_BUS
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HELPER
+ select DRM_EXEC
select DRM_KMS_HELPER
select DRM_PANEL
select DRM_BRIDGE
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 49671364fdcf..b1173128b5b9 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -63,6 +63,7 @@ msm-$(CONFIG_DRM_MSM_DPU) += \
disp/dpu1/dpu_encoder_phys_wb.o \
disp/dpu1/dpu_formats.o \
disp/dpu1/dpu_hw_catalog.o \
+ disp/dpu1/dpu_hw_cdm.o \
disp/dpu1/dpu_hw_ctl.o \
disp/dpu1/dpu_hw_dsc.o \
disp/dpu1/dpu_hw_dsc_1_2.o \
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index e5916c106796..c003f970189b 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -684,7 +684,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
- u32 regbit;
+ u32 hbb;
int ret;
gpu_write(gpu, REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
@@ -820,18 +820,15 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
- /* Set the highest bank bit */
- if (adreno_is_a540(adreno_gpu) || adreno_is_a530(adreno_gpu))
- regbit = 2;
- else
- regbit = 1;
+ BUG_ON(adreno_gpu->ubwc_config.highest_bank_bit < 13);
+ hbb = adreno_gpu->ubwc_config.highest_bank_bit - 13;
- gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, regbit << 7);
- gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, regbit << 1);
+ gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, hbb << 7);
+ gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, hbb << 1);
if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu) ||
adreno_is_a540(adreno_gpu))
- gpu_write(gpu, REG_A5XX_UCHE_DBG_ECO_CNTL_2, regbit);
+ gpu_write(gpu, REG_A5XX_UCHE_DBG_ECO_CNTL_2, hbb);
/* Disable All flat shading optimization (ALLFLATOPTDIS) */
gpu_rmw(gpu, REG_A5XX_VPC_DBG_ECO_CNTL, 0, (1 << 10));
@@ -1785,5 +1782,11 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
/* Set up the preemption specific bits and pieces for each ringbuffer */
a5xx_preempt_init(gpu);
+ /* Set the highest bank bit */
+ if (adreno_is_a540(adreno_gpu) || adreno_is_a530(adreno_gpu))
+ adreno_gpu->ubwc_config.highest_bank_bit = 15;
+ else
+ adreno_gpu->ubwc_config.highest_bank_bit = 14;
+
return gpu;
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 7a0220d29a23..c0bc924cd302 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -1270,87 +1270,92 @@ static void a6xx_set_cp_protect(struct msm_gpu *gpu)
gpu_write(gpu, REG_A6XX_CP_PROTECT(count_max - 1), regs[i]);
}
-static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
+static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
{
- struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
/* Unknown, introduced with A650 family, related to UBWC mode/ver 4 */
- u32 rgb565_predicator = 0;
+ gpu->ubwc_config.rgb565_predicator = 0;
/* Unknown, introduced with A650 family */
- u32 uavflagprd_inv = 0;
+ gpu->ubwc_config.uavflagprd_inv = 0;
/* Whether the minimum access length is 64 bits */
- u32 min_acc_len = 0;
+ gpu->ubwc_config.min_acc_len = 0;
/* Entirely magic, per-GPU-gen value */
- u32 ubwc_mode = 0;
+ gpu->ubwc_config.ubwc_mode = 0;
/*
* The Highest Bank Bit value represents the bit of the highest DDR bank.
- * We then subtract 13 from it (13 is the minimum value allowed by hw) and
- * write the lowest two bits of the remaining value as hbb_lo and the
- * one above it as hbb_hi to the hardware. This should ideally use DRAM
- * type detection.
+ * This should ideally use DRAM type detection.
*/
- u32 hbb_hi = 0;
- u32 hbb_lo = 2;
- /* Unknown, introduced with A640/680 */
- u32 amsbc = 0;
+ gpu->ubwc_config.highest_bank_bit = 15;
- if (adreno_is_a610(adreno_gpu)) {
- /* HBB = 14 */
- hbb_lo = 1;
- min_acc_len = 1;
- ubwc_mode = 1;
+ if (adreno_is_a610(gpu)) {
+ gpu->ubwc_config.highest_bank_bit = 14;
+ gpu->ubwc_config.min_acc_len = 1;
+ gpu->ubwc_config.ubwc_mode = 1;
}
/* a618 is using the hw default values */
- if (adreno_is_a618(adreno_gpu))
+ if (adreno_is_a618(gpu))
return;
- if (adreno_is_a619_holi(adreno_gpu))
- hbb_lo = 0;
+ if (adreno_is_a619_holi(gpu))
+ gpu->ubwc_config.highest_bank_bit = 13;
- if (adreno_is_a640_family(adreno_gpu))
- amsbc = 1;
+ if (adreno_is_a640_family(gpu))
+ gpu->ubwc_config.amsbc = 1;
- if (adreno_is_a650(adreno_gpu) ||
- adreno_is_a660(adreno_gpu) ||
- adreno_is_a730(adreno_gpu) ||
- adreno_is_a740_family(adreno_gpu)) {
+ if (adreno_is_a650(gpu) ||
+ adreno_is_a660(gpu) ||
+ adreno_is_a690(gpu) ||
+ adreno_is_a730(gpu) ||
+ adreno_is_a740_family(gpu)) {
/* TODO: get ddr type from bootloader and use 2 for LPDDR4 */
- hbb_lo = 3;
- amsbc = 1;
- rgb565_predicator = 1;
- uavflagprd_inv = 2;
+ gpu->ubwc_config.highest_bank_bit = 16;
+ gpu->ubwc_config.amsbc = 1;
+ gpu->ubwc_config.rgb565_predicator = 1;
+ gpu->ubwc_config.uavflagprd_inv = 2;
}
- if (adreno_is_a690(adreno_gpu)) {
- hbb_lo = 2;
- amsbc = 1;
- rgb565_predicator = 1;
- uavflagprd_inv = 2;
+ if (adreno_is_7c3(gpu)) {
+ gpu->ubwc_config.highest_bank_bit = 14;
+ gpu->ubwc_config.amsbc = 1;
+ gpu->ubwc_config.rgb565_predicator = 1;
+ gpu->ubwc_config.uavflagprd_inv = 2;
}
+}
- if (adreno_is_7c3(adreno_gpu)) {
- hbb_lo = 1;
- amsbc = 1;
- rgb565_predicator = 1;
- uavflagprd_inv = 2;
- }
+static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ /*
+ * We subtract 13 from the highest bank bit (13 is the minimum value
+ * allowed by hw) and write the lowest two bits of the remaining value
+ * as hbb_lo and the one above it as hbb_hi to the hardware.
+ */
+ BUG_ON(adreno_gpu->ubwc_config.highest_bank_bit < 13);
+ u32 hbb = adreno_gpu->ubwc_config.highest_bank_bit - 13;
+ u32 hbb_hi = hbb >> 2;
+ u32 hbb_lo = hbb & 3;
gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL,
- rgb565_predicator << 11 | hbb_hi << 10 | amsbc << 4 |
- min_acc_len << 3 | hbb_lo << 1 | ubwc_mode);
+ adreno_gpu->ubwc_config.rgb565_predicator << 11 |
+ hbb_hi << 10 | adreno_gpu->ubwc_config.amsbc << 4 |
+ adreno_gpu->ubwc_config.min_acc_len << 3 |
+ hbb_lo << 1 | adreno_gpu->ubwc_config.ubwc_mode);
gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, hbb_hi << 4 |
- min_acc_len << 3 | hbb_lo << 1 | ubwc_mode);
+ adreno_gpu->ubwc_config.min_acc_len << 3 |
+ hbb_lo << 1 | adreno_gpu->ubwc_config.ubwc_mode);
gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, hbb_hi << 10 |
- uavflagprd_inv << 4 | min_acc_len << 3 |
- hbb_lo << 1 | ubwc_mode);
+ adreno_gpu->ubwc_config.uavflagprd_inv << 4 |
+ adreno_gpu->ubwc_config.min_acc_len << 3 |
+ hbb_lo << 1 | adreno_gpu->ubwc_config.ubwc_mode);
if (adreno_is_a7xx(adreno_gpu))
gpu_write(gpu, REG_A7XX_GRAS_NC_MODE_CNTL,
FIELD_PREP(GENMASK(8, 5), hbb_lo));
- gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, min_acc_len << 23 | hbb_lo << 21);
+ gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL,
+ adreno_gpu->ubwc_config.min_acc_len << 23 | hbb_lo << 21);
}
static int a6xx_cp_init(struct msm_gpu *gpu)
@@ -1741,7 +1746,9 @@ static int hw_init(struct msm_gpu *gpu)
/* Setting the primFifo thresholds default values,
* and vccCacheSkipDis=1 bit (0x200) for A640 and newer
*/
- if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu) || adreno_is_a690(adreno_gpu))
+ if (adreno_is_a690(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00800200);
+ else if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu))
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300200);
else if (adreno_is_a640_family(adreno_gpu) || adreno_is_7c3(adreno_gpu))
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00200200);
@@ -1775,6 +1782,8 @@ static int hw_init(struct msm_gpu *gpu)
if (adreno_is_a730(adreno_gpu) ||
adreno_is_a740_family(adreno_gpu))
gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0xcfffff);
+ else if (adreno_is_a690(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x4fffff);
else if (adreno_is_a619(adreno_gpu))
gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x3fffff);
else if (adreno_is_a610(adreno_gpu))
@@ -1782,7 +1791,7 @@ static int hw_init(struct msm_gpu *gpu)
else
gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x1fffff);
- gpu_write(gpu, REG_A6XX_UCHE_CLIENT_PF, 1);
+ gpu_write(gpu, REG_A6XX_UCHE_CLIENT_PF, BIT(7) | 0x1);
/* Set weights for bicubic filtering */
if (adreno_is_a650_family(adreno_gpu)) {
@@ -1808,12 +1817,17 @@ static int hw_init(struct msm_gpu *gpu)
a6xx_set_cp_protect(gpu);
if (adreno_is_a660_family(adreno_gpu)) {
- gpu_write(gpu, REG_A6XX_CP_CHICKEN_DBG, 0x1);
+ if (adreno_is_a690(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_CP_CHICKEN_DBG, 0x00028801);
+ else
+ gpu_write(gpu, REG_A6XX_CP_CHICKEN_DBG, 0x1);
gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x0);
}
+ if (adreno_is_a690(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, 0x90);
/* Set dualQ + disable afull for A660 GPU */
- if (adreno_is_a660(adreno_gpu))
+ else if (adreno_is_a660(adreno_gpu))
gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, 0x66906);
else if (adreno_is_a7xx(adreno_gpu))
gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG,
@@ -2908,5 +2922,7 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu,
a6xx_fault_handler);
+ a6xx_calc_ubwc_config(adreno_gpu);
+
return gpu;
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index f62ab5257e66..2ce7d7b1690d 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -464,7 +464,7 @@ static const struct adreno_info gpulist[] = {
{ 190, 1 },
),
}, {
- .chip_ids = ADRENO_CHIP_IDS(0x06080000),
+ .chip_ids = ADRENO_CHIP_IDS(0x06080001),
.family = ADRENO_6XX_GEN2,
.revn = 680,
.fw = {
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 3fe9fd240cc7..074fb498706f 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -373,6 +373,9 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
return -EINVAL;
*value = ctx->aspace->va_size;
return 0;
+ case MSM_PARAM_HIGHEST_BANK_BIT:
+ *value = adreno_gpu->ubwc_config.highest_bank_bit;
+ return 0;
default:
DBG("%s: invalid param: %u", gpu->name, param);
return -EINVAL;
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 80b3f6312116..bc14df96feb0 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -165,6 +165,15 @@ struct adreno_gpu {
/* firmware: */
const struct firmware *fw[ADRENO_FW_MAX];
+ struct {
+ u32 rgb565_predicator;
+ u32 uavflagprd_inv;
+ u32 min_acc_len;
+ u32 ubwc_mode;
+ u32 highest_bank_bit;
+ u32 amsbc;
+ } ubwc_config;
+
/*
* Register offsets are different between some GPUs.
* GPU specific offsets will be exported by GPU specific
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h
new file mode 100644
index 000000000000..eb5dfff2ec4f
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h
@@ -0,0 +1,457 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_10_0_SM8650_H
+#define _DPU_10_0_SM8650_H
+
+static const struct dpu_caps sm8650_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0xb,
+ .has_src_split = true,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .has_3d_merge = true,
+ .max_linewidth = 8192,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+};
+
+static const struct dpu_mdp_cfg sm8650_mdp = {
+ .name = "top_0",
+ .base = 0, .len = 0x494,
+ .features = BIT(DPU_MDP_PERIPH_0_REMOVED),
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
+ },
+};
+
+/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
+static const struct dpu_ctl_cfg sm8650_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x15000, .len = 0x1000,
+ .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x16000, .len = 0x1000,
+ .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x17000, .len = 0x1000,
+ .features = CTL_SM8550_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ }, {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x18000, .len = 0x1000,
+ .features = CTL_SM8550_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ }, {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x19000, .len = 0x1000,
+ .features = CTL_SM8550_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
+ }, {
+ .name = "ctl_5", .id = CTL_5,
+ .base = 0x1a000, .len = 0x1000,
+ .features = CTL_SM8550_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
+ },
+};
+
+static const struct dpu_sspp_cfg sm8650_sspp[] = {
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_3,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_1", .id = SSPP_VIG1,
+ .base = 0x6000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_3,
+ .xin_id = 4,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_2", .id = SSPP_VIG2,
+ .base = 0x8000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_3,
+ .xin_id = 8,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_3", .id = SSPP_VIG3,
+ .base = 0xa000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_3,
+ .xin_id = 12,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_10", .id = SSPP_DMA2,
+ .base = 0x28000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 9,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_11", .id = SSPP_DMA3,
+ .base = 0x2a000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 13,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_12", .id = SSPP_DMA4,
+ .base = 0x2c000, .len = 0x344,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 14,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_13", .id = SSPP_DMA5,
+ .base = 0x2e000, .len = 0x344,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 15,
+ .type = SSPP_TYPE_DMA,
+ },
+};
+
+static const struct dpu_lm_cfg sm8650_lm[] = {
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x400,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_1,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x400,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_0,
+ .pingpong = PINGPONG_1,
+ .dspp = DSPP_1,
+ }, {
+ .name = "lm_2", .id = LM_2,
+ .base = 0x46000, .len = 0x400,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_3,
+ .pingpong = PINGPONG_2,
+ }, {
+ .name = "lm_3", .id = LM_3,
+ .base = 0x47000, .len = 0x400,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_2,
+ .pingpong = PINGPONG_3,
+ }, {
+ .name = "lm_4", .id = LM_4,
+ .base = 0x48000, .len = 0x400,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_5,
+ .pingpong = PINGPONG_4,
+ }, {
+ .name = "lm_5", .id = LM_5,
+ .base = 0x49000, .len = 0x400,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_4,
+ .pingpong = PINGPONG_5,
+ },
+};
+
+static const struct dpu_dspp_cfg sm8650_dspp[] = {
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_1", .id = DSPP_1,
+ .base = 0x56000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_2", .id = DSPP_2,
+ .base = 0x58000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_3", .id = DSPP_3,
+ .base = 0x5a000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ },
+};
+
+static const struct dpu_pingpong_cfg sm8650_pp[] = {
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x69000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x6a000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ }, {
+ .name = "pingpong_2", .id = PINGPONG_2,
+ .base = 0x6b000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ }, {
+ .name = "pingpong_3", .id = PINGPONG_3,
+ .base = 0x6c000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
+ }, {
+ .name = "pingpong_4", .id = PINGPONG_4,
+ .base = 0x6d000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
+ }, {
+ .name = "pingpong_5", .id = PINGPONG_5,
+ .base = 0x6e000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
+ }, {
+ .name = "pingpong_6", .id = PINGPONG_6,
+ .base = 0x66000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_3,
+ }, {
+ .name = "pingpong_7", .id = PINGPONG_7,
+ .base = 0x66400, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_3,
+ }, {
+ .name = "pingpong_8", .id = PINGPONG_8,
+ .base = 0x7e000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_4,
+ }, {
+ .name = "pingpong_9", .id = PINGPONG_9,
+ .base = 0x7e400, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_4,
+ },
+};
+
+static const struct dpu_merge_3d_cfg sm8650_merge_3d[] = {
+ {
+ .name = "merge_3d_0", .id = MERGE_3D_0,
+ .base = 0x4e000, .len = 0x8,
+ }, {
+ .name = "merge_3d_1", .id = MERGE_3D_1,
+ .base = 0x4f000, .len = 0x8,
+ }, {
+ .name = "merge_3d_2", .id = MERGE_3D_2,
+ .base = 0x50000, .len = 0x8,
+ }, {
+ .name = "merge_3d_3", .id = MERGE_3D_3,
+ .base = 0x66700, .len = 0x8,
+ }, {
+ .name = "merge_3d_4", .id = MERGE_3D_4,
+ .base = 0x7e700, .len = 0x8,
+ },
+};
+
+/*
+ * NOTE: Each display compression engine (DCE) contains dual hard
+ * slice DSC encoders so both share same base address but with
+ * its own different sub block address.
+ */
+static const struct dpu_dsc_cfg sm8650_dsc[] = {
+ {
+ .name = "dce_0_0", .id = DSC_0,
+ .base = 0x80000, .len = 0x6,
+ .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &dsc_sblk_0,
+ }, {
+ .name = "dce_0_1", .id = DSC_1,
+ .base = 0x80000, .len = 0x6,
+ .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &dsc_sblk_1,
+ }, {
+ .name = "dce_1_0", .id = DSC_2,
+ .base = 0x81000, .len = 0x6,
+ .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &dsc_sblk_0,
+ }, {
+ .name = "dce_1_1", .id = DSC_3,
+ .base = 0x81000, .len = 0x6,
+ .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &dsc_sblk_1,
+ }, {
+ .name = "dce_2_0", .id = DSC_4,
+ .base = 0x82000, .len = 0x6,
+ .features = BIT(DPU_DSC_HW_REV_1_2),
+ .sblk = &dsc_sblk_0,
+ }, {
+ .name = "dce_2_1", .id = DSC_5,
+ .base = 0x82000, .len = 0x6,
+ .features = BIT(DPU_DSC_HW_REV_1_2),
+ .sblk = &dsc_sblk_1,
+ },
+};
+
+static const struct dpu_wb_cfg sm8650_wb[] = {
+ {
+ .name = "wb_2", .id = WB_2,
+ .base = 0x65000, .len = 0x2c8,
+ .features = WB_SM8250_MASK,
+ .format_list = wb2_formats_rgb,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .xin_id = 6,
+ .vbif_idx = VBIF_RT,
+ .maxlinewidth = 4096,
+ .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4),
+ },
+};
+
+static const struct dpu_intf_cfg sm8650_intf[] = {
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x34000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x35000, .len = 0x300,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2),
+ }, {
+ .name = "intf_2", .id = INTF_2,
+ .base = 0x36000, .len = 0x300,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF2_TEAR_INTR, 2),
+ }, {
+ .name = "intf_3", .id = INTF_3,
+ .base = 0x37000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
+ },
+};
+
+static const struct dpu_perf_cfg sm8650_perf_data = {
+ .max_bw_low = 17000000,
+ .max_bw_high = 27000000,
+ .min_core_ib = 2500000,
+ .min_llcc_ib = 0,
+ .min_dram_ib = 800000,
+ .min_prefill_lines = 35,
+ /* FIXME: lut tables */
+ .danger_lut_tbl = {0x3ffff, 0x3ffff, 0x0},
+ .safe_lut_tbl = {0xfe00, 0xfe00, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sc7180_qos_linear),
+ .entries = sc7180_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
+ .entries = sc7180_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_nrt),
+ .entries = sc7180_qos_nrt
+ },
+ /* TODO: macrotile-qseed is different from macrotile */
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_mdss_version sm8650_mdss_ver = {
+ .core_major_ver = 10,
+ .core_minor_ver = 0,
+};
+
+const struct dpu_mdss_cfg dpu_sm8650_cfg = {
+ .mdss_ver = &sm8650_mdss_ver,
+ .caps = &sm8650_dpu_caps,
+ .mdp = &sm8650_mdp,
+ .ctl_count = ARRAY_SIZE(sm8650_ctl),
+ .ctl = sm8650_ctl,
+ .sspp_count = ARRAY_SIZE(sm8650_sspp),
+ .sspp = sm8650_sspp,
+ .mixer_count = ARRAY_SIZE(sm8650_lm),
+ .mixer = sm8650_lm,
+ .dspp_count = ARRAY_SIZE(sm8650_dspp),
+ .dspp = sm8650_dspp,
+ .pingpong_count = ARRAY_SIZE(sm8650_pp),
+ .pingpong = sm8650_pp,
+ .dsc_count = ARRAY_SIZE(sm8650_dsc),
+ .dsc = sm8650_dsc,
+ .merge_3d_count = ARRAY_SIZE(sm8650_merge_3d),
+ .merge_3d = sm8650_merge_3d,
+ .wb_count = ARRAY_SIZE(sm8650_wb),
+ .wb = sm8650_wb,
+ .intf_count = ARRAY_SIZE(sm8650_intf),
+ .intf = sm8650_intf,
+ .vbif_count = ARRAY_SIZE(sm8650_vbif),
+ .vbif = sm8650_vbif,
+ .perf = &sm8650_perf_data,
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
index aa1867943c9f..1d3e9666c741 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
@@ -10,7 +10,6 @@
static const struct dpu_caps msm8998_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0x7,
- .qseed_type = DPU_SSPP_SCALER_QSEED3,
.has_src_split = true,
.has_dim_layer = true,
.has_idle_pc = true,
@@ -70,7 +69,7 @@ static const struct dpu_sspp_cfg msm8998_sspp[] = {
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1ac,
.features = VIG_MSM8998_MASK,
- .sblk = &msm8998_vig_sblk_0,
+ .sblk = &dpu_vig_sblk_qseed3_1_2,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG0,
@@ -78,7 +77,7 @@ static const struct dpu_sspp_cfg msm8998_sspp[] = {
.name = "sspp_1", .id = SSPP_VIG1,
.base = 0x6000, .len = 0x1ac,
.features = VIG_MSM8998_MASK,
- .sblk = &msm8998_vig_sblk_1,
+ .sblk = &dpu_vig_sblk_qseed3_1_2,
.xin_id = 4,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG1,
@@ -86,7 +85,7 @@ static const struct dpu_sspp_cfg msm8998_sspp[] = {
.name = "sspp_2", .id = SSPP_VIG2,
.base = 0x8000, .len = 0x1ac,
.features = VIG_MSM8998_MASK,
- .sblk = &msm8998_vig_sblk_2,
+ .sblk = &dpu_vig_sblk_qseed3_1_2,
.xin_id = 8,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG2,
@@ -94,7 +93,7 @@ static const struct dpu_sspp_cfg msm8998_sspp[] = {
.name = "sspp_3", .id = SSPP_VIG3,
.base = 0xa000, .len = 0x1ac,
.features = VIG_MSM8998_MASK,
- .sblk = &msm8998_vig_sblk_3,
+ .sblk = &dpu_vig_sblk_qseed3_1_2,
.xin_id = 12,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG3,
@@ -102,7 +101,7 @@ static const struct dpu_sspp_cfg msm8998_sspp[] = {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1ac,
.features = DMA_MSM8998_MASK,
- .sblk = &sdm845_dma_sblk_0,
+ .sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA0,
@@ -110,7 +109,7 @@ static const struct dpu_sspp_cfg msm8998_sspp[] = {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1ac,
.features = DMA_MSM8998_MASK,
- .sblk = &sdm845_dma_sblk_1,
+ .sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA1,
@@ -118,7 +117,7 @@ static const struct dpu_sspp_cfg msm8998_sspp[] = {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x1ac,
.features = DMA_CURSOR_MSM8998_MASK,
- .sblk = &sdm845_dma_sblk_2,
+ .sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA2,
@@ -126,7 +125,7 @@ static const struct dpu_sspp_cfg msm8998_sspp[] = {
.name = "sspp_11", .id = SSPP_DMA3,
.base = 0x2a000, .len = 0x1ac,
.features = DMA_CURSOR_MSM8998_MASK,
- .sblk = &sdm845_dma_sblk_3,
+ .sblk = &dpu_dma_sblk,
.xin_id = 13,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA3,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
index 38ac0c1a134b..7a23389a5732 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
@@ -10,7 +10,6 @@
static const struct dpu_caps sdm845_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0xb,
- .qseed_type = DPU_SSPP_SCALER_QSEED3,
.has_src_split = true,
.has_dim_layer = true,
.has_idle_pc = true,
@@ -68,7 +67,7 @@ static const struct dpu_sspp_cfg sdm845_sspp[] = {
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1c8,
.features = VIG_SDM845_MASK_SDMA,
- .sblk = &sdm845_vig_sblk_0,
+ .sblk = &dpu_vig_sblk_qseed3_1_3,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG0,
@@ -76,7 +75,7 @@ static const struct dpu_sspp_cfg sdm845_sspp[] = {
.name = "sspp_1", .id = SSPP_VIG1,
.base = 0x6000, .len = 0x1c8,
.features = VIG_SDM845_MASK_SDMA,
- .sblk = &sdm845_vig_sblk_1,
+ .sblk = &dpu_vig_sblk_qseed3_1_3,
.xin_id = 4,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG1,
@@ -84,7 +83,7 @@ static const struct dpu_sspp_cfg sdm845_sspp[] = {
.name = "sspp_2", .id = SSPP_VIG2,
.base = 0x8000, .len = 0x1c8,
.features = VIG_SDM845_MASK_SDMA,
- .sblk = &sdm845_vig_sblk_2,
+ .sblk = &dpu_vig_sblk_qseed3_1_3,
.xin_id = 8,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG2,
@@ -92,7 +91,7 @@ static const struct dpu_sspp_cfg sdm845_sspp[] = {
.name = "sspp_3", .id = SSPP_VIG3,
.base = 0xa000, .len = 0x1c8,
.features = VIG_SDM845_MASK_SDMA,
- .sblk = &sdm845_vig_sblk_3,
+ .sblk = &dpu_vig_sblk_qseed3_1_3,
.xin_id = 12,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG3,
@@ -100,7 +99,7 @@ static const struct dpu_sspp_cfg sdm845_sspp[] = {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1c8,
.features = DMA_SDM845_MASK_SDMA,
- .sblk = &sdm845_dma_sblk_0,
+ .sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA0,
@@ -108,7 +107,7 @@ static const struct dpu_sspp_cfg sdm845_sspp[] = {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1c8,
.features = DMA_SDM845_MASK_SDMA,
- .sblk = &sdm845_dma_sblk_1,
+ .sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA1,
@@ -116,7 +115,7 @@ static const struct dpu_sspp_cfg sdm845_sspp[] = {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x1c8,
.features = DMA_CURSOR_SDM845_MASK_SDMA,
- .sblk = &sdm845_dma_sblk_2,
+ .sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA2,
@@ -124,7 +123,7 @@ static const struct dpu_sspp_cfg sdm845_sspp[] = {
.name = "sspp_11", .id = SSPP_DMA3,
.base = 0x2a000, .len = 0x1c8,
.features = DMA_CURSOR_SDM845_MASK_SDMA,
- .sblk = &sdm845_dma_sblk_3,
+ .sblk = &dpu_dma_sblk,
.xin_id = 13,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA3,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h
new file mode 100644
index 000000000000..cbbdaebe357e
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023, Richard Acayan. All rights reserved.
+ */
+
+#ifndef _DPU_4_1_SDM670_H
+#define _DPU_4_1_SDM670_H
+
+static const struct dpu_mdp_cfg sdm670_mdp = {
+ .name = "top_0",
+ .base = 0x0, .len = 0x45c,
+ .features = BIT(DPU_MDP_AUDIO_SELECT),
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
+ },
+};
+
+static const struct dpu_sspp_cfg sdm670_sspp[] = {
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x1c8,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_1_3,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_1", .id = SSPP_VIG1,
+ .base = 0x6000, .len = 0x1c8,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_1_3,
+ .xin_id = 4,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x1c8,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x1c8,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA1,
+ }, {
+ .name = "sspp_10", .id = SSPP_DMA2,
+ .base = 0x28000, .len = 0x1c8,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 9,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA2,
+ },
+};
+
+static const struct dpu_dsc_cfg sdm670_dsc[] = {
+ {
+ .name = "dsc_0", .id = DSC_0,
+ .base = 0x80000, .len = 0x140,
+ }, {
+ .name = "dsc_1", .id = DSC_1,
+ .base = 0x80400, .len = 0x140,
+ },
+};
+
+static const struct dpu_mdss_version sdm670_mdss_ver = {
+ .core_major_ver = 4,
+ .core_minor_ver = 1,
+};
+
+const struct dpu_mdss_cfg dpu_sdm670_cfg = {
+ .mdss_ver = &sdm670_mdss_ver,
+ .caps = &sdm845_dpu_caps,
+ .mdp = &sdm670_mdp,
+ .ctl_count = ARRAY_SIZE(sdm845_ctl),
+ .ctl = sdm845_ctl,
+ .sspp_count = ARRAY_SIZE(sdm670_sspp),
+ .sspp = sdm670_sspp,
+ .mixer_count = ARRAY_SIZE(sdm845_lm),
+ .mixer = sdm845_lm,
+ .pingpong_count = ARRAY_SIZE(sdm845_pp),
+ .pingpong = sdm845_pp,
+ .dsc_count = ARRAY_SIZE(sdm670_dsc),
+ .dsc = sdm670_dsc,
+ .intf_count = ARRAY_SIZE(sdm845_intf),
+ .intf = sdm845_intf,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .perf = &sdm845_perf_data,
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
index 9392ad2b4d3f..145f3d5953a3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
@@ -10,7 +10,6 @@
static const struct dpu_caps sm8150_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0xb,
- .qseed_type = DPU_SSPP_SCALER_QSEED3,
.has_src_split = true,
.has_dim_layer = true,
.has_idle_pc = true,
@@ -77,7 +76,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f0,
.features = VIG_SDM845_MASK,
- .sblk = &sdm845_vig_sblk_0,
+ .sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG0,
@@ -85,7 +84,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
.name = "sspp_1", .id = SSPP_VIG1,
.base = 0x6000, .len = 0x1f0,
.features = VIG_SDM845_MASK,
- .sblk = &sdm845_vig_sblk_1,
+ .sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 4,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG1,
@@ -93,7 +92,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
.name = "sspp_2", .id = SSPP_VIG2,
.base = 0x8000, .len = 0x1f0,
.features = VIG_SDM845_MASK,
- .sblk = &sdm845_vig_sblk_2,
+ .sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 8,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG2,
@@ -101,7 +100,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
.name = "sspp_3", .id = SSPP_VIG3,
.base = 0xa000, .len = 0x1f0,
.features = VIG_SDM845_MASK,
- .sblk = &sdm845_vig_sblk_3,
+ .sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 12,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG3,
@@ -109,7 +108,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f0,
.features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_0,
+ .sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA0,
@@ -117,7 +116,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1f0,
.features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_1,
+ .sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA1,
@@ -125,7 +124,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x1f0,
.features = DMA_CURSOR_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_2,
+ .sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA2,
@@ -133,7 +132,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
.name = "sspp_11", .id = SSPP_DMA3,
.base = 0x2a000, .len = 0x1f0,
.features = DMA_CURSOR_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_3,
+ .sblk = &dpu_dma_sblk,
.xin_id = 13,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA3,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
index e07f4c8c25b9..9e3bec8bc121 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
@@ -10,7 +10,6 @@
static const struct dpu_caps sc8180x_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0xb,
- .qseed_type = DPU_SSPP_SCALER_QSEED3,
.has_src_split = true,
.has_dim_layer = true,
.has_idle_pc = true,
@@ -76,7 +75,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f0,
.features = VIG_SDM845_MASK,
- .sblk = &sdm845_vig_sblk_0,
+ .sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG0,
@@ -84,7 +83,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
.name = "sspp_1", .id = SSPP_VIG1,
.base = 0x6000, .len = 0x1f0,
.features = VIG_SDM845_MASK,
- .sblk = &sdm845_vig_sblk_1,
+ .sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 4,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG1,
@@ -92,7 +91,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
.name = "sspp_2", .id = SSPP_VIG2,
.base = 0x8000, .len = 0x1f0,
.features = VIG_SDM845_MASK,
- .sblk = &sdm845_vig_sblk_2,
+ .sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 8,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG2,
@@ -100,7 +99,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
.name = "sspp_3", .id = SSPP_VIG3,
.base = 0xa000, .len = 0x1f0,
.features = VIG_SDM845_MASK,
- .sblk = &sdm845_vig_sblk_3,
+ .sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 12,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG3,
@@ -108,7 +107,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f0,
.features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_0,
+ .sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA0,
@@ -116,7 +115,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1f0,
.features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_1,
+ .sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA1,
@@ -124,7 +123,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x1f0,
.features = DMA_CURSOR_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_2,
+ .sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA2,
@@ -132,7 +131,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
.name = "sspp_11", .id = SSPP_DMA3,
.base = 0x2a000, .len = 0x1f0,
.features = DMA_CURSOR_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_3,
+ .sblk = &dpu_dma_sblk,
.xin_id = 13,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA3,
@@ -367,6 +366,7 @@ static const struct dpu_perf_cfg sc8180x_perf_data = {
.min_llcc_ib = 800000,
.min_dram_ib = 800000,
.danger_lut_tbl = {0xf, 0xffff, 0x0},
+ .safe_lut_tbl = {0xfff0, 0xf000, 0xffff},
.qos_lut_tbl = {
{.nentry = ARRAY_SIZE(sc7180_qos_linear),
.entries = sc7180_qos_linear
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
index cec7af6667dc..76b2ec0d2489 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
@@ -68,8 +68,8 @@ static const struct dpu_sspp_cfg sm6125_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f0,
- .features = VIG_SM6125_MASK,
- .sblk = &sm6125_vig_sblk_0,
+ .features = VIG_SDM845_MASK,
+ .sblk = &dpu_vig_sblk_qseed3_2_4,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG0,
@@ -77,7 +77,7 @@ static const struct dpu_sspp_cfg sm6125_sspp[] = {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f0,
.features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_0,
+ .sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA0,
@@ -85,7 +85,7 @@ static const struct dpu_sspp_cfg sm6125_sspp[] = {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1f0,
.features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_1,
+ .sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA1,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
index 94278a3e3483..a57d50b1f028 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
@@ -10,7 +10,6 @@
static const struct dpu_caps sm8250_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0xb,
- .qseed_type = DPU_SSPP_SCALER_QSEED4,
.has_src_split = true,
.has_dim_layer = true,
.has_idle_pc = true,
@@ -32,7 +31,7 @@ static const struct dpu_mdp_cfg sm8250_mdp = {
[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
[DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 },
[DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
- [DPU_CLK_CTRL_WB2] = { .reg_off = 0x3b8, .bit_off = 24 },
+ [DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 },
},
};
@@ -75,32 +74,32 @@ static const struct dpu_sspp_cfg sm8250_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f8,
- .features = VIG_SC7180_MASK_SDMA,
- .sblk = &sm8250_vig_sblk_0,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG0,
}, {
.name = "sspp_1", .id = SSPP_VIG1,
.base = 0x6000, .len = 0x1f8,
- .features = VIG_SC7180_MASK_SDMA,
- .sblk = &sm8250_vig_sblk_1,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 4,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG1,
}, {
.name = "sspp_2", .id = SSPP_VIG2,
.base = 0x8000, .len = 0x1f8,
- .features = VIG_SC7180_MASK_SDMA,
- .sblk = &sm8250_vig_sblk_2,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 8,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG2,
}, {
.name = "sspp_3", .id = SSPP_VIG3,
.base = 0xa000, .len = 0x1f8,
- .features = VIG_SC7180_MASK_SDMA,
- .sblk = &sm8250_vig_sblk_3,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 12,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG3,
@@ -108,7 +107,7 @@ static const struct dpu_sspp_cfg sm8250_sspp[] = {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f8,
.features = DMA_SDM845_MASK_SDMA,
- .sblk = &sdm845_dma_sblk_0,
+ .sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA0,
@@ -116,7 +115,7 @@ static const struct dpu_sspp_cfg sm8250_sspp[] = {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1f8,
.features = DMA_SDM845_MASK_SDMA,
- .sblk = &sdm845_dma_sblk_1,
+ .sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA1,
@@ -124,7 +123,7 @@ static const struct dpu_sspp_cfg sm8250_sspp[] = {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x1f8,
.features = DMA_CURSOR_SDM845_MASK_SDMA,
- .sblk = &sdm845_dma_sblk_2,
+ .sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA2,
@@ -132,7 +131,7 @@ static const struct dpu_sspp_cfg sm8250_sspp[] = {
.name = "sspp_11", .id = SSPP_DMA3,
.base = 0x2a000, .len = 0x1f8,
.features = DMA_CURSOR_SDM845_MASK_SDMA,
- .sblk = &sdm845_dma_sblk_3,
+ .sblk = &dpu_dma_sblk,
.xin_id = 13,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA3,
@@ -337,8 +336,8 @@ static const struct dpu_wb_cfg sm8250_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats,
- .num_formats = ARRAY_SIZE(wb2_formats),
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
.xin_id = 6,
.vbif_idx = VBIF_RT,
@@ -385,6 +384,7 @@ const struct dpu_mdss_cfg dpu_sm8250_cfg = {
.mdss_ver = &sm8250_mdss_ver,
.caps = &sm8250_dpu_caps,
.mdp = &sm8250_mdp,
+ .cdm = &sc7280_cdm,
.ctl_count = ARRAY_SIZE(sm8250_ctl),
.ctl = sm8250_ctl,
.sspp_count = ARRAY_SIZE(sm8250_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
index c0d88ddccb28..7382ebb6e5b2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
@@ -10,7 +10,6 @@
static const struct dpu_caps sc7180_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0x9,
- .qseed_type = DPU_SSPP_SCALER_QSEED4,
.has_dim_layer = true,
.has_idle_pc = true,
.max_linewidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
@@ -25,7 +24,7 @@ static const struct dpu_mdp_cfg sc7180_mdp = {
[DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2c4, .bit_off = 8 },
- [DPU_CLK_CTRL_WB2] = { .reg_off = 0x3b8, .bit_off = 24 },
+ [DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 },
},
};
@@ -52,8 +51,8 @@ static const struct dpu_sspp_cfg sc7180_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f8,
- .features = VIG_SC7180_MASK,
- .sblk = &sc7180_vig_sblk_0,
+ .features = VIG_SDM845_MASK,
+ .sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG0,
@@ -61,7 +60,7 @@ static const struct dpu_sspp_cfg sc7180_sspp[] = {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f8,
.features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_0,
+ .sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA0,
@@ -69,7 +68,7 @@ static const struct dpu_sspp_cfg sc7180_sspp[] = {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1f8,
.features = DMA_CURSOR_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_1,
+ .sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA1,
@@ -77,7 +76,7 @@ static const struct dpu_sspp_cfg sc7180_sspp[] = {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x1f8,
.features = DMA_CURSOR_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_2,
+ .sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA2,
@@ -158,8 +157,8 @@ static const struct dpu_wb_cfg sc7180_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats,
- .num_formats = ARRAY_SIZE(wb2_formats),
+ .format_list = wb2_formats_rgb,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb),
.clk_ctrl = DPU_CLK_CTRL_WB2,
.xin_id = 6,
.vbif_idx = VBIF_RT,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h
index 57ce14c18def..43f64a005f5a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h
@@ -10,7 +10,6 @@
static const struct dpu_caps sm6115_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_LINE_WIDTH,
.max_mixer_blendstages = 0x4,
- .qseed_type = DPU_SSPP_SCALER_QSEED4,
.has_dim_layer = true,
.has_idle_pc = true,
.max_linewidth = 2160,
@@ -39,8 +38,8 @@ static const struct dpu_sspp_cfg sm6115_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f8,
- .features = VIG_SC7180_MASK,
- .sblk = &sm6115_vig_sblk_0,
+ .features = VIG_SDM845_MASK,
+ .sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG0,
@@ -48,7 +47,7 @@ static const struct dpu_sspp_cfg sm6115_sspp[] = {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f8,
.features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_0,
+ .sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA0,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
index 62db84bd15f2..e17a30be7525 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
@@ -11,7 +11,6 @@
static const struct dpu_caps sm6350_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0x7,
- .qseed_type = DPU_SSPP_SCALER_QSEED4,
.has_src_split = true,
.has_dim_layer = true,
.has_idle_pc = true,
@@ -59,8 +58,8 @@ static const struct dpu_sspp_cfg sm6350_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f8,
- .features = VIG_SC7180_MASK,
- .sblk = &sc7180_vig_sblk_0,
+ .features = VIG_SDM845_MASK,
+ .sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG0,
@@ -68,7 +67,7 @@ static const struct dpu_sspp_cfg sm6350_sspp[] = {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f8,
.features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_0,
+ .sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA0,
@@ -76,7 +75,7 @@ static const struct dpu_sspp_cfg sm6350_sspp[] = {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1f8,
.features = DMA_CURSOR_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_1,
+ .sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA1,
@@ -84,7 +83,7 @@ static const struct dpu_sspp_cfg sm6350_sspp[] = {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x1f8,
.features = DMA_CURSOR_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_2,
+ .sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA2,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h
index fb36fba5171c..3cbb2fe8aba2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h
@@ -39,7 +39,7 @@ static const struct dpu_sspp_cfg qcm2290_sspp[] = {
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f8,
.features = VIG_QCM2290_MASK,
- .sblk = &qcm2290_vig_sblk_0,
+ .sblk = &dpu_vig_sblk_noscale,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG0,
@@ -47,7 +47,7 @@ static const struct dpu_sspp_cfg qcm2290_sspp[] = {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f8,
.features = DMA_SDM845_MASK,
- .sblk = &qcm2290_dma_sblk_0,
+ .sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA0,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h
index 5a3aad364c78..a06c8634d2d7 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h
@@ -11,7 +11,6 @@
static const struct dpu_caps sm6375_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_LINE_WIDTH,
.max_mixer_blendstages = 0x4,
- .qseed_type = DPU_SSPP_SCALER_QSEED4,
.has_dim_layer = true,
.has_idle_pc = true,
.max_linewidth = 2160,
@@ -40,8 +39,8 @@ static const struct dpu_sspp_cfg sm6375_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f8,
- .features = VIG_SC7180_MASK,
- .sblk = &sm6115_vig_sblk_0,
+ .features = VIG_SDM845_MASK,
+ .sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG0,
@@ -49,7 +48,7 @@ static const struct dpu_sspp_cfg sm6375_sspp[] = {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f8,
.features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_0,
+ .sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA0,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
index 1709ba57f384..aced16e350da 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
@@ -10,7 +10,6 @@
static const struct dpu_caps sm8350_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0xb,
- .qseed_type = DPU_SSPP_SCALER_QSEED4,
.has_src_split = true,
.has_dim_layer = true,
.has_idle_pc = true,
@@ -31,6 +30,7 @@ static const struct dpu_mdp_cfg sm8350_mdp = {
[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
[DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 },
+ [DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 },
[DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
},
};
@@ -74,64 +74,64 @@ static const struct dpu_sspp_cfg sm8350_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f8,
- .features = VIG_SC7180_MASK,
- .sblk = &sm8250_vig_sblk_0,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG0,
}, {
.name = "sspp_1", .id = SSPP_VIG1,
.base = 0x6000, .len = 0x1f8,
- .features = VIG_SC7180_MASK,
- .sblk = &sm8250_vig_sblk_1,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 4,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG1,
}, {
.name = "sspp_2", .id = SSPP_VIG2,
.base = 0x8000, .len = 0x1f8,
- .features = VIG_SC7180_MASK,
- .sblk = &sm8250_vig_sblk_2,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 8,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG2,
}, {
.name = "sspp_3", .id = SSPP_VIG3,
.base = 0xa000, .len = 0x1f8,
- .features = VIG_SC7180_MASK,
- .sblk = &sm8250_vig_sblk_3,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 12,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG3,
}, {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f8,
- .features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_0,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA0,
}, {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1f8,
- .features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_1,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA1,
}, {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x1f8,
- .features = DMA_CURSOR_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_2,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA2,
}, {
.name = "sspp_11", .id = SSPP_DMA3,
.base = 0x2a000, .len = 0x1f8,
- .features = DMA_CURSOR_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_3,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
.xin_id = 13,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA3,
@@ -298,6 +298,21 @@ static const struct dpu_dsc_cfg sm8350_dsc[] = {
},
};
+static const struct dpu_wb_cfg sm8350_wb[] = {
+ {
+ .name = "wb_2", .id = WB_2,
+ .base = 0x65000, .len = 0x2c8,
+ .features = WB_SM8250_MASK,
+ .format_list = wb2_formats_rgb,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .clk_ctrl = DPU_CLK_CTRL_WB2,
+ .xin_id = 6,
+ .vbif_idx = VBIF_RT,
+ .maxlinewidth = 4096,
+ .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4),
+ },
+};
+
static const struct dpu_intf_cfg sm8350_intf[] = {
{
.name = "intf_0", .id = INTF_0,
@@ -393,6 +408,8 @@ const struct dpu_mdss_cfg dpu_sm8350_cfg = {
.dsc = sm8350_dsc,
.merge_3d_count = ARRAY_SIZE(sm8350_merge_3d),
.merge_3d = sm8350_merge_3d,
+ .wb_count = ARRAY_SIZE(sm8350_wb),
+ .wb = sm8350_wb,
.intf_count = ARRAY_SIZE(sm8350_intf),
.intf = sm8350_intf,
.vbif_count = ARRAY_SIZE(sdm845_vbif),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h
index 15942fa5a8e0..2f153e0b5c6a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h
@@ -10,7 +10,6 @@
static const struct dpu_caps sc7280_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0x7,
- .qseed_type = DPU_SSPP_SCALER_QSEED4,
.has_dim_layer = true,
.has_idle_pc = true,
.max_linewidth = 2400,
@@ -25,7 +24,7 @@ static const struct dpu_mdp_cfg sc7280_mdp = {
[DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2c4, .bit_off = 8 },
- [DPU_CLK_CTRL_WB2] = { .reg_off = 0x3b8, .bit_off = 24 },
+ [DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 },
},
};
@@ -58,7 +57,7 @@ static const struct dpu_sspp_cfg sc7280_sspp[] = {
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f8,
.features = VIG_SC7280_MASK_SDMA,
- .sblk = &sc7280_vig_sblk_0,
+ .sblk = &dpu_vig_sblk_qseed3_3_0_rot_v2,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG0,
@@ -66,7 +65,7 @@ static const struct dpu_sspp_cfg sc7280_sspp[] = {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f8,
.features = DMA_SDM845_MASK_SDMA,
- .sblk = &sdm845_dma_sblk_0,
+ .sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA0,
@@ -74,7 +73,7 @@ static const struct dpu_sspp_cfg sc7280_sspp[] = {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1f8,
.features = DMA_CURSOR_SDM845_MASK_SDMA,
- .sblk = &sdm845_dma_sblk_1,
+ .sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA1,
@@ -82,7 +81,7 @@ static const struct dpu_sspp_cfg sc7280_sspp[] = {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x1f8,
.features = DMA_CURSOR_SDM845_MASK_SDMA,
- .sblk = &sdm845_dma_sblk_2,
+ .sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA2,
@@ -170,8 +169,8 @@ static const struct dpu_wb_cfg sc7280_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats,
- .num_formats = ARRAY_SIZE(wb2_formats),
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
.xin_id = 6,
.vbif_idx = VBIF_RT,
@@ -249,6 +248,7 @@ const struct dpu_mdss_cfg dpu_sc7280_cfg = {
.mdss_ver = &sc7280_mdss_ver,
.caps = &sc7280_dpu_caps,
.mdp = &sc7280_mdp,
+ .cdm = &sc7280_cdm,
.ctl_count = ARRAY_SIZE(sc7280_ctl),
.ctl = sc7280_ctl,
.sspp_count = ARRAY_SIZE(sc7280_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
index 4c0528794e7a..0d143e390eca 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
@@ -10,7 +10,6 @@
static const struct dpu_caps sc8280xp_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 11,
- .qseed_type = DPU_SSPP_SCALER_QSEED4,
.has_src_split = true,
.has_dim_layer = true,
.has_idle_pc = true,
@@ -75,32 +74,32 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x2ac,
- .features = VIG_SC7180_MASK,
- .sblk = &sm8250_vig_sblk_0,
+ .features = VIG_SDM845_MASK,
+ .sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG0,
}, {
.name = "sspp_1", .id = SSPP_VIG1,
.base = 0x6000, .len = 0x2ac,
- .features = VIG_SC7180_MASK,
- .sblk = &sm8250_vig_sblk_1,
+ .features = VIG_SDM845_MASK,
+ .sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 4,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG1,
}, {
.name = "sspp_2", .id = SSPP_VIG2,
.base = 0x8000, .len = 0x2ac,
- .features = VIG_SC7180_MASK,
- .sblk = &sm8250_vig_sblk_2,
+ .features = VIG_SDM845_MASK,
+ .sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 8,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG2,
}, {
.name = "sspp_3", .id = SSPP_VIG3,
.base = 0xa000, .len = 0x2ac,
- .features = VIG_SC7180_MASK,
- .sblk = &sm8250_vig_sblk_3,
+ .features = VIG_SDM845_MASK,
+ .sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 12,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG3,
@@ -108,7 +107,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x2ac,
.features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_0,
+ .sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA0,
@@ -116,7 +115,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x2ac,
.features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_1,
+ .sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA1,
@@ -124,7 +123,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x2ac,
.features = DMA_CURSOR_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_2,
+ .sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA2,
@@ -132,7 +131,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = {
.name = "sspp_11", .id = SSPP_DMA3,
.base = 0x2a000, .len = 0x2ac,
.features = DMA_CURSOR_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_3,
+ .sblk = &dpu_dma_sblk,
.xin_id = 13,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA3,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
index 7742f52be859..a1779c5597ae 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
@@ -10,7 +10,6 @@
static const struct dpu_caps sm8450_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0xb,
- .qseed_type = DPU_SSPP_SCALER_QSEED4,
.has_src_split = true,
.has_dim_layer = true,
.has_idle_pc = true,
@@ -32,6 +31,7 @@ static const struct dpu_mdp_cfg sm8450_mdp = {
[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
[DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 },
+ [DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 },
[DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
},
};
@@ -75,64 +75,64 @@ static const struct dpu_sspp_cfg sm8450_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x32c,
- .features = VIG_SC7180_MASK,
- .sblk = &sm8250_vig_sblk_0,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_1,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG0,
}, {
.name = "sspp_1", .id = SSPP_VIG1,
.base = 0x6000, .len = 0x32c,
- .features = VIG_SC7180_MASK,
- .sblk = &sm8250_vig_sblk_1,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_1,
.xin_id = 4,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG1,
}, {
.name = "sspp_2", .id = SSPP_VIG2,
.base = 0x8000, .len = 0x32c,
- .features = VIG_SC7180_MASK,
- .sblk = &sm8250_vig_sblk_2,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_1,
.xin_id = 8,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG2,
}, {
.name = "sspp_3", .id = SSPP_VIG3,
.base = 0xa000, .len = 0x32c,
- .features = VIG_SC7180_MASK,
- .sblk = &sm8250_vig_sblk_3,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_1,
.xin_id = 12,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG3,
}, {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x32c,
- .features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_0,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA0,
}, {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x32c,
- .features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_1,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA1,
}, {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x32c,
- .features = DMA_CURSOR_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_2,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA2,
}, {
.name = "sspp_11", .id = SSPP_DMA3,
.base = 0x2a000, .len = 0x32c,
- .features = DMA_CURSOR_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_3,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
.xin_id = 13,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA3,
@@ -316,6 +316,21 @@ static const struct dpu_dsc_cfg sm8450_dsc[] = {
},
};
+static const struct dpu_wb_cfg sm8450_wb[] = {
+ {
+ .name = "wb_2", .id = WB_2,
+ .base = 0x65000, .len = 0x2c8,
+ .features = WB_SM8250_MASK,
+ .format_list = wb2_formats_rgb,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .clk_ctrl = DPU_CLK_CTRL_WB2,
+ .xin_id = 6,
+ .vbif_idx = VBIF_RT,
+ .maxlinewidth = 4096,
+ .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4),
+ },
+};
+
static const struct dpu_intf_cfg sm8450_intf[] = {
{
.name = "intf_0", .id = INTF_0,
@@ -411,6 +426,8 @@ const struct dpu_mdss_cfg dpu_sm8450_cfg = {
.dsc = sm8450_dsc,
.merge_3d_count = ARRAY_SIZE(sm8450_merge_3d),
.merge_3d = sm8450_merge_3d,
+ .wb_count = ARRAY_SIZE(sm8450_wb),
+ .wb = sm8450_wb,
.intf_count = ARRAY_SIZE(sm8450_intf),
.intf = sm8450_intf,
.vbif_count = ARRAY_SIZE(sdm845_vbif),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
index 69b80af6566a..ad48defa154f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
@@ -10,7 +10,6 @@
static const struct dpu_caps sm8550_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0xb,
- .qseed_type = DPU_SSPP_SCALER_QSEED4,
.has_src_split = true,
.has_dim_layer = true,
.has_idle_pc = true,
@@ -67,71 +66,71 @@ static const struct dpu_sspp_cfg sm8550_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x344,
- .features = VIG_SC7180_MASK,
- .sblk = &sm8550_vig_sblk_0,
+ .features = VIG_SDM845_MASK,
+ .sblk = &dpu_vig_sblk_qseed3_3_2,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
}, {
.name = "sspp_1", .id = SSPP_VIG1,
.base = 0x6000, .len = 0x344,
- .features = VIG_SC7180_MASK,
- .sblk = &sm8550_vig_sblk_1,
+ .features = VIG_SDM845_MASK,
+ .sblk = &dpu_vig_sblk_qseed3_3_2,
.xin_id = 4,
.type = SSPP_TYPE_VIG,
}, {
.name = "sspp_2", .id = SSPP_VIG2,
.base = 0x8000, .len = 0x344,
- .features = VIG_SC7180_MASK,
- .sblk = &sm8550_vig_sblk_2,
+ .features = VIG_SDM845_MASK,
+ .sblk = &dpu_vig_sblk_qseed3_3_2,
.xin_id = 8,
.type = SSPP_TYPE_VIG,
}, {
.name = "sspp_3", .id = SSPP_VIG3,
.base = 0xa000, .len = 0x344,
- .features = VIG_SC7180_MASK,
- .sblk = &sm8550_vig_sblk_3,
+ .features = VIG_SDM845_MASK,
+ .sblk = &dpu_vig_sblk_qseed3_3_2,
.xin_id = 12,
.type = SSPP_TYPE_VIG,
}, {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x344,
.features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_0,
+ .sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
}, {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x344,
.features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_1,
+ .sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
}, {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x344,
.features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_2,
+ .sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
}, {
.name = "sspp_11", .id = SSPP_DMA3,
.base = 0x2a000, .len = 0x344,
.features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_3,
+ .sblk = &dpu_dma_sblk,
.xin_id = 13,
.type = SSPP_TYPE_DMA,
}, {
.name = "sspp_12", .id = SSPP_DMA4,
.base = 0x2c000, .len = 0x344,
.features = DMA_CURSOR_SDM845_MASK,
- .sblk = &sm8550_dma_sblk_4,
+ .sblk = &dpu_dma_sblk,
.xin_id = 14,
.type = SSPP_TYPE_DMA,
}, {
.name = "sspp_13", .id = SSPP_DMA5,
.base = 0x2e000, .len = 0x344,
.features = DMA_CURSOR_SDM845_MASK,
- .sblk = &sm8550_dma_sblk_5,
+ .sblk = &dpu_dma_sblk,
.xin_id = 15,
.type = SSPP_TYPE_DMA,
},
@@ -316,8 +315,8 @@ static const struct dpu_wb_cfg sm8550_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats,
- .num_formats = ARRAY_SIZE(wb2_formats),
+ .format_list = wb2_formats_rgb,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb),
.xin_id = 6,
.vbif_idx = VBIF_RT,
.maxlinewidth = 4096,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index 3c475f8042b0..88c2e51ab166 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
@@ -51,17 +51,6 @@ static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
return to_dpu_kms(priv->kms);
}
-static void dpu_crtc_destroy(struct drm_crtc *crtc)
-{
- struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
-
- if (!crtc)
- return;
-
- drm_crtc_cleanup(crtc);
- kfree(dpu_crtc);
-}
-
static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -125,7 +114,7 @@ static void dpu_crtc_setup_lm_misr(struct dpu_crtc_state *crtc_state)
continue;
/* Calculate MISR over 1 frame */
- m->hw_lm->ops.setup_misr(m->hw_lm, true, 1);
+ m->hw_lm->ops.setup_misr(m->hw_lm);
}
}
@@ -1435,7 +1424,6 @@ static int dpu_crtc_late_register(struct drm_crtc *crtc)
static const struct drm_crtc_funcs dpu_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
- .destroy = dpu_crtc_destroy,
.page_flip = drm_atomic_helper_page_flip,
.reset = dpu_crtc_reset,
.atomic_duplicate_state = dpu_crtc_duplicate_state,
@@ -1469,9 +1457,13 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
struct dpu_crtc *dpu_crtc;
int i, ret;
- dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL);
- if (!dpu_crtc)
- return ERR_PTR(-ENOMEM);
+ dpu_crtc = drmm_crtc_alloc_with_planes(dev, struct dpu_crtc, base,
+ plane, cursor,
+ &dpu_crtc_funcs,
+ NULL);
+
+ if (IS_ERR(dpu_crtc))
+ return ERR_CAST(dpu_crtc);
crtc = &dpu_crtc->base;
crtc->dev = dev;
@@ -1491,9 +1483,6 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
dpu_crtc_frame_event_work);
}
- drm_crtc_init_with_planes(dev, crtc, plane, cursor, &dpu_crtc_funcs,
- NULL);
-
drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
if (dpu_kms->catalog->dspp_count)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 1cf7ff6caff4..83380bc92a00 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2013 Red Hat
* Copyright (c) 2014-2018, 2020-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Author: Rob Clark <robdclark@gmail.com>
*/
@@ -16,6 +16,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_file.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_framebuffer.h>
#include "msm_drv.h"
#include "dpu_kms.h"
@@ -26,6 +27,7 @@
#include "dpu_hw_dspp.h"
#include "dpu_hw_dsc.h"
#include "dpu_hw_merge3d.h"
+#include "dpu_hw_cdm.h"
#include "dpu_formats.h"
#include "dpu_encoder_phys.h"
#include "dpu_crtc.h"
@@ -39,6 +41,9 @@
#define DPU_ERROR_ENC(e, fmt, ...) DPU_ERROR("enc%d " fmt,\
(e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
+#define DPU_ERROR_ENC_RATELIMITED(e, fmt, ...) DPU_ERROR_RATELIMITED("enc%d " fmt,\
+ (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
+
/*
* Two to anticipate panels that can do cmd/vid dynamic switching
* plan is to create all possible physical encoder types, and switch between
@@ -151,6 +156,8 @@ enum dpu_enc_rc_states {
* @crtc_frame_event_cb: callback handler for frame event
* @crtc_frame_event_cb_data: callback handler private data
* @frame_done_timeout_ms: frame done timeout in ms
+ * @frame_done_timeout_cnt: atomic counter tracking the number of frame
+ * done timeouts
* @frame_done_timer: watchdog timer for frame done event
* @disp_info: local copy of msm_display_info struct
* @idle_pc_supported: indicate if idle power collaps is supported
@@ -184,13 +191,13 @@ struct dpu_encoder_virt {
struct drm_crtc *crtc;
struct drm_connector *connector;
- struct dentry *debugfs_root;
struct mutex enc_lock;
DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL);
void (*crtc_frame_event_cb)(void *, u32 event);
void *crtc_frame_event_cb_data;
atomic_t frame_done_timeout_ms;
+ atomic_t frame_done_timeout_cnt;
struct timer_list frame_done_timer;
struct msm_display_info disp_info;
@@ -255,7 +262,7 @@ void dpu_encoder_setup_misr(const struct drm_encoder *drm_enc)
if (!phys->hw_intf || !phys->hw_intf->ops.setup_misr)
continue;
- phys->hw_intf->ops.setup_misr(phys->hw_intf, true, 1);
+ phys->hw_intf->ops.setup_misr(phys->hw_intf);
}
}
@@ -439,41 +446,6 @@ int dpu_encoder_get_linecount(struct drm_encoder *drm_enc)
return linecount;
}
-static void dpu_encoder_destroy(struct drm_encoder *drm_enc)
-{
- struct dpu_encoder_virt *dpu_enc = NULL;
- int i = 0;
-
- if (!drm_enc) {
- DPU_ERROR("invalid encoder\n");
- return;
- }
-
- dpu_enc = to_dpu_encoder_virt(drm_enc);
- DPU_DEBUG_ENC(dpu_enc, "\n");
-
- mutex_lock(&dpu_enc->enc_lock);
-
- for (i = 0; i < dpu_enc->num_phys_encs; i++) {
- struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
-
- if (phys->ops.destroy) {
- phys->ops.destroy(phys);
- --dpu_enc->num_phys_encs;
- dpu_enc->phys_encs[i] = NULL;
- }
- }
-
- if (dpu_enc->num_phys_encs)
- DPU_ERROR_ENC(dpu_enc, "expected 0 num_phys_encs not %d\n",
- dpu_enc->num_phys_encs);
- dpu_enc->num_phys_encs = 0;
- mutex_unlock(&dpu_enc->enc_lock);
-
- drm_encoder_cleanup(drm_enc);
- mutex_destroy(&dpu_enc->enc_lock);
-}
-
void dpu_encoder_helper_split_config(
struct dpu_encoder_phys *phys_enc,
enum dpu_intf interface)
@@ -614,6 +586,7 @@ static int dpu_encoder_virt_atomic_check(
struct drm_display_mode *adj_mode;
struct msm_display_topology topology;
struct dpu_global_state *global_state;
+ struct drm_framebuffer *fb;
struct drm_dsc_config *dsc;
int i = 0;
int ret = 0;
@@ -655,6 +628,22 @@ static int dpu_encoder_virt_atomic_check(
topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode, crtc_state, dsc);
/*
+ * Use CDM only for writeback at the moment as other interfaces cannot handle it.
+ * if writeback itself cannot handle cdm for some reason it will fail in its atomic_check()
+ * earlier.
+ */
+ if (dpu_enc->disp_info.intf_type == INTF_WB && conn_state->writeback_job) {
+ fb = conn_state->writeback_job->fb;
+
+ if (fb && DPU_FORMAT_IS_YUV(to_dpu_format(msm_framebuffer_format(fb))))
+ topology.needs_cdm = true;
+ if (topology.needs_cdm && !dpu_enc->cur_master->hw_cdm)
+ crtc_state->mode_changed = true;
+ else if (!topology.needs_cdm && dpu_enc->cur_master->hw_cdm)
+ crtc_state->mode_changed = true;
+ }
+
+ /*
* Release and Allocate resources on every modeset
* Dont allocate when active is false.
*/
@@ -1094,6 +1083,15 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
dpu_enc->dsc_mask = dsc_mask;
+ if (dpu_enc->disp_info.intf_type == INTF_WB && conn_state->writeback_job) {
+ struct dpu_hw_blk *hw_cdm = NULL;
+
+ dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ drm_enc->base.id, DPU_HW_BLK_CDM,
+ &hw_cdm, 1);
+ dpu_enc->cur_master->hw_cdm = hw_cdm ? to_dpu_hw_cdm(hw_cdm) : NULL;
+ }
+
cstate = to_dpu_crtc_state(crtc_state);
for (i = 0; i < num_lm; i++) {
@@ -1204,6 +1202,8 @@ static void dpu_encoder_virt_atomic_enable(struct drm_encoder *drm_enc,
dpu_enc->dsc = dpu_encoder_get_dsc_config(drm_enc);
+ atomic_set(&dpu_enc->frame_done_timeout_cnt, 0);
+
if (disp_info->intf_type == INTF_DP)
dpu_enc->wide_bus_en = msm_dp_wide_bus_available(priv->dp[index]);
else if (disp_info->intf_type == INTF_DSI)
@@ -2080,6 +2080,15 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
phys_enc->hw_pp->merge_3d->idx);
}
+ if (phys_enc->hw_cdm) {
+ if (phys_enc->hw_cdm->ops.bind_pingpong_blk && phys_enc->hw_pp)
+ phys_enc->hw_cdm->ops.bind_pingpong_blk(phys_enc->hw_cdm,
+ PINGPONG_NONE);
+ if (phys_enc->hw_ctl->ops.update_pending_flush_cdm)
+ phys_enc->hw_ctl->ops.update_pending_flush_cdm(phys_enc->hw_ctl,
+ phys_enc->hw_cdm->idx);
+ }
+
if (dpu_enc->dsc) {
dpu_encoder_unprep_dsc(dpu_enc);
dpu_enc->dsc = NULL;
@@ -2108,18 +2117,20 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
#ifdef CONFIG_DEBUG_FS
static int _dpu_encoder_status_show(struct seq_file *s, void *data)
{
- struct dpu_encoder_virt *dpu_enc = s->private;
+ struct drm_encoder *drm_enc = s->private;
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
int i;
mutex_lock(&dpu_enc->enc_lock);
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
- seq_printf(s, "intf:%d wb:%d vsync:%8d underrun:%8d ",
+ seq_printf(s, "intf:%d wb:%d vsync:%8d underrun:%8d frame_done_cnt:%d",
phys->hw_intf ? phys->hw_intf->idx - INTF_0 : -1,
phys->hw_wb ? phys->hw_wb->idx - WB_0 : -1,
atomic_read(&phys->vsync_cnt),
- atomic_read(&phys->underrun_cnt));
+ atomic_read(&phys->underrun_cnt),
+ atomic_read(&dpu_enc->frame_done_timeout_cnt));
seq_printf(s, "mode: %s\n", dpu_encoder_helper_get_intf_type(phys->intf_mode));
}
@@ -2130,49 +2141,18 @@ static int _dpu_encoder_status_show(struct seq_file *s, void *data)
DEFINE_SHOW_ATTRIBUTE(_dpu_encoder_status);
-static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
+static void dpu_encoder_debugfs_init(struct drm_encoder *drm_enc, struct dentry *root)
{
- struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
-
- char name[12];
-
- if (!drm_enc->dev) {
- DPU_ERROR("invalid encoder or kms\n");
- return -EINVAL;
- }
-
- snprintf(name, sizeof(name), "encoder%u", drm_enc->base.id);
-
- /* create overall sub-directory for the encoder */
- dpu_enc->debugfs_root = debugfs_create_dir(name,
- drm_enc->dev->primary->debugfs_root);
-
/* don't error check these */
debugfs_create_file("status", 0600,
- dpu_enc->debugfs_root, dpu_enc, &_dpu_encoder_status_fops);
-
- return 0;
+ root, drm_enc, &_dpu_encoder_status_fops);
}
#else
-static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
-{
- return 0;
-}
+#define dpu_encoder_debugfs_init NULL
#endif
-static int dpu_encoder_late_register(struct drm_encoder *encoder)
-{
- return _dpu_encoder_init_debugfs(encoder);
-}
-
-static void dpu_encoder_early_unregister(struct drm_encoder *encoder)
-{
- struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder);
-
- debugfs_remove_recursive(dpu_enc->debugfs_root);
-}
-
static int dpu_encoder_virt_add_phys_encs(
+ struct drm_device *dev,
struct msm_display_info *disp_info,
struct dpu_encoder_virt *dpu_enc,
struct dpu_enc_phys_init_params *params)
@@ -2194,7 +2174,7 @@ static int dpu_encoder_virt_add_phys_encs(
if (disp_info->intf_type == INTF_WB) {
- enc = dpu_encoder_phys_wb_init(params);
+ enc = dpu_encoder_phys_wb_init(dev, params);
if (IS_ERR(enc)) {
DPU_ERROR_ENC(dpu_enc, "failed to init wb enc: %ld\n",
@@ -2205,7 +2185,7 @@ static int dpu_encoder_virt_add_phys_encs(
dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
++dpu_enc->num_phys_encs;
} else if (disp_info->is_cmd_mode) {
- enc = dpu_encoder_phys_cmd_init(params);
+ enc = dpu_encoder_phys_cmd_init(dev, params);
if (IS_ERR(enc)) {
DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n",
@@ -2216,7 +2196,7 @@ static int dpu_encoder_virt_add_phys_encs(
dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
++dpu_enc->num_phys_encs;
} else {
- enc = dpu_encoder_phys_vid_init(params);
+ enc = dpu_encoder_phys_vid_init(dev, params);
if (IS_ERR(enc)) {
DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n",
@@ -2305,7 +2285,7 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
break;
}
- ret = dpu_encoder_virt_add_phys_encs(disp_info,
+ ret = dpu_encoder_virt_add_phys_encs(dpu_kms->dev, disp_info,
dpu_enc, &phys_params);
if (ret) {
DPU_ERROR_ENC(dpu_enc, "failed to add phys encs\n");
@@ -2339,7 +2319,10 @@ static void dpu_encoder_frame_done_timeout(struct timer_list *t)
return;
}
- DPU_ERROR_ENC(dpu_enc, "frame done timeout\n");
+ DPU_ERROR_ENC_RATELIMITED(dpu_enc, "frame done timeout\n");
+
+ if (atomic_inc_return(&dpu_enc->frame_done_timeout_cnt) == 1)
+ msm_disp_snapshot_state(drm_enc->dev);
event = DPU_ENCODER_FRAME_EVENT_ERROR;
trace_dpu_enc_frame_done_timeout(DRMID(drm_enc), event);
@@ -2354,9 +2337,7 @@ static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = {
};
static const struct drm_encoder_funcs dpu_encoder_funcs = {
- .destroy = dpu_encoder_destroy,
- .late_register = dpu_encoder_late_register,
- .early_unregister = dpu_encoder_early_unregister,
+ .debugfs_init = dpu_encoder_debugfs_init,
};
struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
@@ -2365,20 +2346,13 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
{
struct msm_drm_private *priv = dev->dev_private;
struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
- struct drm_encoder *drm_enc = NULL;
- struct dpu_encoder_virt *dpu_enc = NULL;
- int ret = 0;
-
- dpu_enc = devm_kzalloc(dev->dev, sizeof(*dpu_enc), GFP_KERNEL);
- if (!dpu_enc)
- return ERR_PTR(-ENOMEM);
+ struct dpu_encoder_virt *dpu_enc;
+ int ret;
- ret = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs,
- drm_enc_mode, NULL);
- if (ret) {
- devm_kfree(dev->dev, dpu_enc);
- return ERR_PTR(ret);
- }
+ dpu_enc = drmm_encoder_alloc(dev, struct dpu_encoder_virt, base,
+ &dpu_encoder_funcs, drm_enc_mode, NULL);
+ if (IS_ERR(dpu_enc))
+ return ERR_CAST(dpu_enc);
drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs);
@@ -2388,10 +2362,13 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
mutex_init(&dpu_enc->rc_lock);
ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info);
- if (ret)
- goto fail;
+ if (ret) {
+ DPU_ERROR("failed to setup encoder\n");
+ return ERR_PTR(-ENOMEM);
+ }
atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
+ atomic_set(&dpu_enc->frame_done_timeout_cnt, 0);
timer_setup(&dpu_enc->frame_done_timer,
dpu_encoder_frame_done_timeout, 0);
@@ -2404,13 +2381,6 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
DPU_DEBUG_ENC(dpu_enc, "created\n");
return &dpu_enc->base;
-
-fail:
- DPU_ERROR("failed to create encoder\n");
- if (drm_enc)
- dpu_encoder_destroy(drm_enc);
-
- return ERR_PTR(ret);
}
int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
@@ -2437,9 +2407,6 @@ int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
case MSM_ENC_TX_COMPLETE:
fn_wait = phys->ops.wait_for_tx_complete;
break;
- case MSM_ENC_VBLANK:
- fn_wait = phys->ops.wait_for_vblank;
- break;
default:
DPU_ERROR_ENC(dpu_enc, "unknown wait event %d\n",
event);
@@ -2497,7 +2464,6 @@ void dpu_encoder_phys_init(struct dpu_encoder_phys *phys_enc,
phys_enc->enc_spinlock = p->enc_spinlock;
phys_enc->enable_state = DPU_ENC_DISABLED;
- atomic_set(&phys_enc->vblank_refcount, 0);
atomic_set(&phys_enc->pending_kickoff_cnt, 0);
atomic_set(&phys_enc->pending_ctlstart_cnt, 0);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
index 6f04c3d56e77..993f26343331 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
@@ -14,8 +14,10 @@
#include "dpu_hw_intf.h"
#include "dpu_hw_wb.h"
#include "dpu_hw_pingpong.h"
+#include "dpu_hw_cdm.h"
#include "dpu_hw_ctl.h"
#include "dpu_hw_top.h"
+#include "dpu_hw_util.h"
#include "dpu_encoder.h"
#include "dpu_crtc.h"
@@ -72,7 +74,6 @@ struct dpu_encoder_phys;
* @enable: DRM Call. Enable a DRM mode.
* @disable: DRM Call. Disable mode.
* @atomic_check: DRM Call. Atomic check new DRM state.
- * @destroy: DRM Call. Destroy and release resources.
* @control_vblank_irq Register/Deregister for VBLANK IRQ
* @wait_for_commit_done: Wait for hardware to have flushed the
* current pending frames to hardware
@@ -102,11 +103,9 @@ struct dpu_encoder_phys_ops {
int (*atomic_check)(struct dpu_encoder_phys *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state);
- void (*destroy)(struct dpu_encoder_phys *encoder);
int (*control_vblank_irq)(struct dpu_encoder_phys *enc, bool enable);
int (*wait_for_commit_done)(struct dpu_encoder_phys *phys_enc);
int (*wait_for_tx_complete)(struct dpu_encoder_phys *phys_enc);
- int (*wait_for_vblank)(struct dpu_encoder_phys *phys_enc);
void (*prepare_for_kickoff)(struct dpu_encoder_phys *phys_enc);
void (*handle_post_kickoff)(struct dpu_encoder_phys *phys_enc);
void (*trigger_start)(struct dpu_encoder_phys *phys_enc);
@@ -153,8 +152,10 @@ enum dpu_intr_idx {
* @hw_pp: Hardware interface to the ping pong registers
* @hw_intf: Hardware interface to the intf registers
* @hw_wb: Hardware interface to the wb registers
+ * @hw_cdm: Hardware interface to the CDM registers
* @dpu_kms: Pointer to the dpu_kms top level
* @cached_mode: DRM mode cached at mode_set time, acted on in enable
+ * @vblank_ctl_lock: Vblank ctl mutex lock to protect vblank_refcount
* @enabled: Whether the encoder has enabled and running a mode
* @split_role: Role to play in a split-panel configuration
* @intf_mode: Interface mode
@@ -181,13 +182,15 @@ struct dpu_encoder_phys {
struct dpu_hw_pingpong *hw_pp;
struct dpu_hw_intf *hw_intf;
struct dpu_hw_wb *hw_wb;
+ struct dpu_hw_cdm *hw_cdm;
struct dpu_kms *dpu_kms;
struct drm_display_mode cached_mode;
+ struct mutex vblank_ctl_lock;
enum dpu_enc_split_role split_role;
enum dpu_intf_mode intf_mode;
spinlock_t *enc_spinlock;
enum dpu_enc_enable_state enable_state;
- atomic_t vblank_refcount;
+ int vblank_refcount;
atomic_t vsync_cnt;
atomic_t underrun_cnt;
atomic_t pending_ctlstart_cnt;
@@ -210,6 +213,7 @@ static inline int dpu_encoder_phys_inc_pending(struct dpu_encoder_phys *phys)
* @wbirq_refcount: Reference count of writeback interrupt
* @wb_done_timeout_cnt: number of wb done irq timeout errors
* @wb_cfg: writeback block config to store fb related details
+ * @cdm_cfg: cdm block config needed to store writeback block's CDM configuration
* @wb_conn: backpointer to writeback connector
* @wb_job: backpointer to current writeback job
* @dest: dpu buffer layout for current writeback output buffer
@@ -219,6 +223,7 @@ struct dpu_encoder_phys_wb {
atomic_t wbirq_refcount;
int wb_done_timeout_cnt;
struct dpu_hw_wb_cfg wb_cfg;
+ struct dpu_hw_cdm_cfg cdm_cfg;
struct drm_writeback_connector *wb_conn;
struct drm_writeback_job *wb_job;
struct dpu_hw_fmt_layout dest;
@@ -281,22 +286,24 @@ struct dpu_encoder_wait_info {
* @p: Pointer to init params structure
* Return: Error code or newly allocated encoder
*/
-struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
+struct dpu_encoder_phys *dpu_encoder_phys_vid_init(struct drm_device *dev,
struct dpu_enc_phys_init_params *p);
/**
* dpu_encoder_phys_cmd_init - Construct a new command mode physical encoder
+ * @dev: Corresponding device for devres management
* @p: Pointer to init params structure
* Return: Error code or newly allocated encoder
*/
-struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
+struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(struct drm_device *dev,
struct dpu_enc_phys_init_params *p);
/**
* dpu_encoder_phys_wb_init - initialize writeback encoder
+ * @dev: Corresponding device for devres management
* @init: Pointer to init info structure with initialization params
*/
-struct dpu_encoder_phys *dpu_encoder_phys_wb_init(
+struct dpu_encoder_phys *dpu_encoder_phys_wb_init(struct drm_device *dev,
struct dpu_enc_phys_init_params *p);
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index be185fe69793..a301e2833177 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -13,6 +13,8 @@
#include "dpu_trace.h"
#include "disp/msm_disp_snapshot.h"
+#include <drm/drm_managed.h>
+
#define DPU_DEBUG_CMDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
(e) && (e)->base.parent ? \
(e)->base.parent->base.id : -1, \
@@ -244,7 +246,8 @@ static int dpu_encoder_phys_cmd_control_vblank_irq(
return -EINVAL;
}
- refcount = atomic_read(&phys_enc->vblank_refcount);
+ mutex_lock(&phys_enc->vblank_ctl_lock);
+ refcount = phys_enc->vblank_refcount;
/* Slave encoders don't report vblank */
if (!dpu_encoder_phys_cmd_is_master(phys_enc))
@@ -260,16 +263,24 @@ static int dpu_encoder_phys_cmd_control_vblank_irq(
phys_enc->hw_pp->idx - PINGPONG_0,
enable ? "true" : "false", refcount);
- if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
- ret = dpu_core_irq_register_callback(phys_enc->dpu_kms,
- phys_enc->irq[INTR_IDX_RDPTR],
- dpu_encoder_phys_cmd_te_rd_ptr_irq,
- phys_enc);
- else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
- ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
- phys_enc->irq[INTR_IDX_RDPTR]);
+ if (enable) {
+ if (phys_enc->vblank_refcount == 0)
+ ret = dpu_core_irq_register_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_RDPTR],
+ dpu_encoder_phys_cmd_te_rd_ptr_irq,
+ phys_enc);
+ if (!ret)
+ phys_enc->vblank_refcount++;
+ } else if (!enable) {
+ if (phys_enc->vblank_refcount == 1)
+ ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_RDPTR]);
+ if (!ret)
+ phys_enc->vblank_refcount--;
+ }
end:
+ mutex_unlock(&phys_enc->vblank_ctl_lock);
if (ret) {
DRM_ERROR("vblank irq err id:%u pp:%d ret:%d, enable %s/%d\n",
DRMID(phys_enc->parent),
@@ -285,7 +296,7 @@ static void dpu_encoder_phys_cmd_irq_control(struct dpu_encoder_phys *phys_enc,
{
trace_dpu_enc_phys_cmd_irq_ctrl(DRMID(phys_enc->parent),
phys_enc->hw_pp->idx - PINGPONG_0,
- enable, atomic_read(&phys_enc->vblank_refcount));
+ enable, phys_enc->vblank_refcount);
if (enable) {
dpu_core_irq_register_callback(phys_enc->dpu_kms,
@@ -558,14 +569,6 @@ static void dpu_encoder_phys_cmd_disable(struct dpu_encoder_phys *phys_enc)
phys_enc->enable_state = DPU_ENC_DISABLED;
}
-static void dpu_encoder_phys_cmd_destroy(struct dpu_encoder_phys *phys_enc)
-{
- struct dpu_encoder_phys_cmd *cmd_enc =
- to_dpu_encoder_phys_cmd(phys_enc);
-
- kfree(cmd_enc);
-}
-
static void dpu_encoder_phys_cmd_prepare_for_kickoff(
struct dpu_encoder_phys *phys_enc)
{
@@ -681,33 +684,6 @@ static int dpu_encoder_phys_cmd_wait_for_commit_done(
return _dpu_encoder_phys_cmd_wait_for_ctl_start(phys_enc);
}
-static int dpu_encoder_phys_cmd_wait_for_vblank(
- struct dpu_encoder_phys *phys_enc)
-{
- int rc = 0;
- struct dpu_encoder_phys_cmd *cmd_enc;
- struct dpu_encoder_wait_info wait_info;
-
- cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
-
- /* only required for master controller */
- if (!dpu_encoder_phys_cmd_is_master(phys_enc))
- return rc;
-
- wait_info.wq = &cmd_enc->pending_vblank_wq;
- wait_info.atomic_cnt = &cmd_enc->pending_vblank_cnt;
- wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
-
- atomic_inc(&cmd_enc->pending_vblank_cnt);
-
- rc = dpu_encoder_helper_wait_for_irq(phys_enc,
- phys_enc->irq[INTR_IDX_RDPTR],
- dpu_encoder_phys_cmd_te_rd_ptr_irq,
- &wait_info);
-
- return rc;
-}
-
static void dpu_encoder_phys_cmd_handle_post_kickoff(
struct dpu_encoder_phys *phys_enc)
{
@@ -731,12 +707,10 @@ static void dpu_encoder_phys_cmd_init_ops(
ops->atomic_mode_set = dpu_encoder_phys_cmd_atomic_mode_set;
ops->enable = dpu_encoder_phys_cmd_enable;
ops->disable = dpu_encoder_phys_cmd_disable;
- ops->destroy = dpu_encoder_phys_cmd_destroy;
ops->control_vblank_irq = dpu_encoder_phys_cmd_control_vblank_irq;
ops->wait_for_commit_done = dpu_encoder_phys_cmd_wait_for_commit_done;
ops->prepare_for_kickoff = dpu_encoder_phys_cmd_prepare_for_kickoff;
ops->wait_for_tx_complete = dpu_encoder_phys_cmd_wait_for_tx_complete;
- ops->wait_for_vblank = dpu_encoder_phys_cmd_wait_for_vblank;
ops->trigger_start = dpu_encoder_phys_cmd_trigger_start;
ops->needs_single_flush = dpu_encoder_phys_cmd_needs_single_flush;
ops->irq_control = dpu_encoder_phys_cmd_irq_control;
@@ -746,7 +720,7 @@ static void dpu_encoder_phys_cmd_init_ops(
ops->get_line_count = dpu_encoder_phys_cmd_get_line_count;
}
-struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
+struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(struct drm_device *dev,
struct dpu_enc_phys_init_params *p)
{
struct dpu_encoder_phys *phys_enc = NULL;
@@ -754,7 +728,7 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
DPU_DEBUG("intf\n");
- cmd_enc = kzalloc(sizeof(*cmd_enc), GFP_KERNEL);
+ cmd_enc = drmm_kzalloc(dev, sizeof(*cmd_enc), GFP_KERNEL);
if (!cmd_enc) {
DPU_ERROR("failed to allocate\n");
return ERR_PTR(-ENOMEM);
@@ -763,6 +737,9 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
dpu_encoder_phys_init(phys_enc, p);
+ mutex_init(&phys_enc->vblank_ctl_lock);
+ phys_enc->vblank_refcount = 0;
+
dpu_encoder_phys_cmd_init_ops(&phys_enc->ops);
phys_enc->intf_mode = INTF_MODE_CMD;
cmd_enc->stream_sel = 0;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index a01fda711883..d0f56c5c4cce 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -11,6 +11,8 @@
#include "dpu_trace.h"
#include "disp/msm_disp_snapshot.h"
+#include <drm/drm_managed.h>
+
#define DPU_DEBUG_VIDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
(e) && (e)->parent ? \
(e)->parent->base.id : -1, \
@@ -364,7 +366,8 @@ static int dpu_encoder_phys_vid_control_vblank_irq(
int ret = 0;
int refcount;
- refcount = atomic_read(&phys_enc->vblank_refcount);
+ mutex_lock(&phys_enc->vblank_ctl_lock);
+ refcount = phys_enc->vblank_refcount;
/* Slave encoders don't report vblank */
if (!dpu_encoder_phys_vid_is_master(phys_enc))
@@ -377,18 +380,26 @@ static int dpu_encoder_phys_vid_control_vblank_irq(
}
DRM_DEBUG_VBL("id:%u enable=%d/%d\n", DRMID(phys_enc->parent), enable,
- atomic_read(&phys_enc->vblank_refcount));
+ refcount);
- if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
- ret = dpu_core_irq_register_callback(phys_enc->dpu_kms,
- phys_enc->irq[INTR_IDX_VSYNC],
- dpu_encoder_phys_vid_vblank_irq,
- phys_enc);
- else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
- ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
- phys_enc->irq[INTR_IDX_VSYNC]);
+ if (enable) {
+ if (phys_enc->vblank_refcount == 0)
+ ret = dpu_core_irq_register_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_VSYNC],
+ dpu_encoder_phys_vid_vblank_irq,
+ phys_enc);
+ if (!ret)
+ phys_enc->vblank_refcount++;
+ } else if (!enable) {
+ if (phys_enc->vblank_refcount == 1)
+ ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_VSYNC]);
+ if (!ret)
+ phys_enc->vblank_refcount--;
+ }
end:
+ mutex_unlock(&phys_enc->vblank_ctl_lock);
if (ret) {
DRM_ERROR("failed: id:%u intf:%d ret:%d enable:%d refcnt:%d\n",
DRMID(phys_enc->parent),
@@ -438,13 +449,7 @@ skip_flush:
phys_enc->enable_state = DPU_ENC_ENABLING;
}
-static void dpu_encoder_phys_vid_destroy(struct dpu_encoder_phys *phys_enc)
-{
- DPU_DEBUG_VIDENC(phys_enc, "\n");
- kfree(phys_enc);
-}
-
-static int dpu_encoder_phys_vid_wait_for_vblank(
+static int dpu_encoder_phys_vid_wait_for_tx_complete(
struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_wait_info wait_info;
@@ -558,7 +563,7 @@ static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
* scanout buffer) don't latch properly..
*/
if (dpu_encoder_phys_vid_is_master(phys_enc)) {
- ret = dpu_encoder_phys_vid_wait_for_vblank(phys_enc);
+ ret = dpu_encoder_phys_vid_wait_for_tx_complete(phys_enc);
if (ret) {
atomic_set(&phys_enc->pending_kickoff_cnt, 0);
DRM_ERROR("wait disable failed: id:%u intf:%d ret:%d\n",
@@ -578,7 +583,7 @@ static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
dpu_encoder_phys_inc_pending(phys_enc);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
- ret = dpu_encoder_phys_vid_wait_for_vblank(phys_enc);
+ ret = dpu_encoder_phys_vid_wait_for_tx_complete(phys_enc);
if (ret) {
atomic_set(&phys_enc->pending_kickoff_cnt, 0);
DRM_ERROR("wait disable failed: id:%u intf:%d ret:%d\n",
@@ -618,7 +623,7 @@ static void dpu_encoder_phys_vid_irq_control(struct dpu_encoder_phys *phys_enc,
trace_dpu_enc_phys_vid_irq_ctrl(DRMID(phys_enc->parent),
phys_enc->hw_intf->idx - INTF_0,
enable,
- atomic_read(&phys_enc->vblank_refcount));
+ phys_enc->vblank_refcount);
if (enable) {
ret = dpu_encoder_phys_vid_control_vblank_irq(phys_enc, true);
@@ -681,11 +686,9 @@ static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops)
ops->atomic_mode_set = dpu_encoder_phys_vid_atomic_mode_set;
ops->enable = dpu_encoder_phys_vid_enable;
ops->disable = dpu_encoder_phys_vid_disable;
- ops->destroy = dpu_encoder_phys_vid_destroy;
ops->control_vblank_irq = dpu_encoder_phys_vid_control_vblank_irq;
ops->wait_for_commit_done = dpu_encoder_phys_vid_wait_for_commit_done;
- ops->wait_for_vblank = dpu_encoder_phys_vid_wait_for_vblank;
- ops->wait_for_tx_complete = dpu_encoder_phys_vid_wait_for_vblank;
+ ops->wait_for_tx_complete = dpu_encoder_phys_vid_wait_for_tx_complete;
ops->irq_control = dpu_encoder_phys_vid_irq_control;
ops->prepare_for_kickoff = dpu_encoder_phys_vid_prepare_for_kickoff;
ops->handle_post_kickoff = dpu_encoder_phys_vid_handle_post_kickoff;
@@ -694,7 +697,7 @@ static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops)
ops->get_frame_count = dpu_encoder_phys_vid_get_frame_count;
}
-struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
+struct dpu_encoder_phys *dpu_encoder_phys_vid_init(struct drm_device *dev,
struct dpu_enc_phys_init_params *p)
{
struct dpu_encoder_phys *phys_enc = NULL;
@@ -704,7 +707,7 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
return ERR_PTR(-EINVAL);
}
- phys_enc = kzalloc(sizeof(*phys_enc), GFP_KERNEL);
+ phys_enc = drmm_kzalloc(dev, sizeof(*phys_enc), GFP_KERNEL);
if (!phys_enc) {
DPU_ERROR("failed to create encoder due to memory allocation error\n");
return ERR_PTR(-ENOMEM);
@@ -713,6 +716,8 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
DPU_DEBUG_VIDENC(phys_enc, "\n");
dpu_encoder_phys_init(phys_enc, p);
+ mutex_init(&phys_enc->vblank_ctl_lock);
+ phys_enc->vblank_refcount = 0;
dpu_encoder_phys_vid_init_ops(&phys_enc->ops);
phys_enc->intf_mode = INTF_MODE_VIDEO;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
index 0b6a761d68b7..4cd2d9e3131a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
@@ -8,6 +8,7 @@
#include <linux/debugfs.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_managed.h>
#include "dpu_encoder_phys.h"
#include "dpu_formats.h"
@@ -206,13 +207,14 @@ static void dpu_encoder_phys_wb_setup_fb(struct dpu_encoder_phys *phys_enc,
}
/**
- * dpu_encoder_phys_wb_setup_cdp - setup chroma down prefetch block
+ * dpu_encoder_phys_wb_setup_ctl - setup wb pipeline for ctl path
* @phys_enc:Pointer to physical encoder
*/
-static void dpu_encoder_phys_wb_setup_cdp(struct dpu_encoder_phys *phys_enc)
+static void dpu_encoder_phys_wb_setup_ctl(struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_wb *hw_wb;
struct dpu_hw_ctl *ctl;
+ struct dpu_hw_cdm *hw_cdm;
if (!phys_enc) {
DPU_ERROR("invalid encoder\n");
@@ -221,6 +223,7 @@ static void dpu_encoder_phys_wb_setup_cdp(struct dpu_encoder_phys *phys_enc)
hw_wb = phys_enc->hw_wb;
ctl = phys_enc->hw_ctl;
+ hw_cdm = phys_enc->hw_cdm;
if (test_bit(DPU_CTL_ACTIVE_CFG, &ctl->caps->features) &&
(phys_enc->hw_ctl &&
@@ -237,6 +240,9 @@ static void dpu_encoder_phys_wb_setup_cdp(struct dpu_encoder_phys *phys_enc)
if (mode_3d && hw_pp && hw_pp->merge_3d)
intf_cfg.merge_3d = hw_pp->merge_3d->idx;
+ if (hw_cdm)
+ intf_cfg.cdm = hw_cdm->idx;
+
if (phys_enc->hw_pp->merge_3d && phys_enc->hw_pp->merge_3d->ops.setup_3d_mode)
phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d,
mode_3d);
@@ -259,6 +265,96 @@ static void dpu_encoder_phys_wb_setup_cdp(struct dpu_encoder_phys *phys_enc)
}
/**
+ * dpu_encoder_helper_phys_setup_cdm - setup chroma down sampling block
+ * This API does not handle DPU_CHROMA_H1V2.
+ * @phys_enc:Pointer to physical encoder
+ */
+static void dpu_encoder_helper_phys_setup_cdm(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_cdm *hw_cdm;
+ struct dpu_hw_cdm_cfg *cdm_cfg;
+ struct dpu_hw_pingpong *hw_pp;
+ struct dpu_encoder_phys_wb *wb_enc;
+ const struct msm_format *format;
+ const struct dpu_format *dpu_fmt;
+ struct drm_writeback_job *wb_job;
+ int ret;
+
+ if (!phys_enc)
+ return;
+
+ wb_enc = to_dpu_encoder_phys_wb(phys_enc);
+ cdm_cfg = &wb_enc->cdm_cfg;
+ hw_pp = phys_enc->hw_pp;
+ hw_cdm = phys_enc->hw_cdm;
+ wb_job = wb_enc->wb_job;
+
+ format = msm_framebuffer_format(wb_enc->wb_job->fb);
+ dpu_fmt = dpu_get_dpu_format_ext(format->pixel_format, wb_job->fb->modifier);
+
+ if (!hw_cdm)
+ return;
+
+ if (!DPU_FORMAT_IS_YUV(dpu_fmt)) {
+ DPU_DEBUG("[enc:%d] cdm_disable fmt:%x\n", DRMID(phys_enc->parent),
+ dpu_fmt->base.pixel_format);
+ if (hw_cdm->ops.bind_pingpong_blk)
+ hw_cdm->ops.bind_pingpong_blk(hw_cdm, PINGPONG_NONE);
+
+ return;
+ }
+
+ memset(cdm_cfg, 0, sizeof(struct dpu_hw_cdm_cfg));
+
+ cdm_cfg->output_width = wb_job->fb->width;
+ cdm_cfg->output_height = wb_job->fb->height;
+ cdm_cfg->output_fmt = dpu_fmt;
+ cdm_cfg->output_type = CDM_CDWN_OUTPUT_WB;
+ cdm_cfg->output_bit_depth = DPU_FORMAT_IS_DX(dpu_fmt) ?
+ CDM_CDWN_OUTPUT_10BIT : CDM_CDWN_OUTPUT_8BIT;
+ cdm_cfg->csc_cfg = &dpu_csc10_rgb2yuv_601l;
+
+ /* enable 10 bit logic */
+ switch (cdm_cfg->output_fmt->chroma_sample) {
+ case DPU_CHROMA_RGB:
+ cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE;
+ cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE;
+ break;
+ case DPU_CHROMA_H2V1:
+ cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE;
+ cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE;
+ break;
+ case DPU_CHROMA_420:
+ cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE;
+ cdm_cfg->v_cdwn_type = CDM_CDWN_OFFSITE;
+ break;
+ case DPU_CHROMA_H1V2:
+ default:
+ DPU_ERROR("[enc:%d] unsupported chroma sampling type\n",
+ DRMID(phys_enc->parent));
+ cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE;
+ cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE;
+ break;
+ }
+
+ DPU_DEBUG("[enc:%d] cdm_enable:%d,%d,%X,%d,%d,%d,%d]\n",
+ DRMID(phys_enc->parent), cdm_cfg->output_width,
+ cdm_cfg->output_height, cdm_cfg->output_fmt->base.pixel_format,
+ cdm_cfg->output_type, cdm_cfg->output_bit_depth,
+ cdm_cfg->h_cdwn_type, cdm_cfg->v_cdwn_type);
+
+ if (hw_cdm->ops.enable) {
+ cdm_cfg->pp_id = hw_pp->idx;
+ ret = hw_cdm->ops.enable(hw_cdm, cdm_cfg);
+ if (ret < 0) {
+ DPU_ERROR("[enc:%d] failed to enable CDM; ret:%d\n",
+ DRMID(phys_enc->parent), ret);
+ return;
+ }
+ }
+}
+
+/**
* dpu_encoder_phys_wb_atomic_check - verify and fixup given atomic states
* @phys_enc: Pointer to physical encoder
* @crtc_state: Pointer to CRTC atomic state
@@ -307,7 +403,7 @@ static int dpu_encoder_phys_wb_atomic_check(
return -EINVAL;
}
- return 0;
+ return drm_atomic_helper_check_wb_connector_state(conn_state->connector, conn_state->state);
}
@@ -320,6 +416,7 @@ static void _dpu_encoder_phys_wb_update_flush(struct dpu_encoder_phys *phys_enc)
struct dpu_hw_wb *hw_wb;
struct dpu_hw_ctl *hw_ctl;
struct dpu_hw_pingpong *hw_pp;
+ struct dpu_hw_cdm *hw_cdm;
u32 pending_flush = 0;
if (!phys_enc)
@@ -328,6 +425,7 @@ static void _dpu_encoder_phys_wb_update_flush(struct dpu_encoder_phys *phys_enc)
hw_wb = phys_enc->hw_wb;
hw_pp = phys_enc->hw_pp;
hw_ctl = phys_enc->hw_ctl;
+ hw_cdm = phys_enc->hw_cdm;
DPU_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
@@ -343,6 +441,9 @@ static void _dpu_encoder_phys_wb_update_flush(struct dpu_encoder_phys *phys_enc)
hw_ctl->ops.update_pending_flush_merge_3d(hw_ctl,
hw_pp->merge_3d->idx);
+ if (hw_cdm && hw_ctl->ops.update_pending_flush_cdm)
+ hw_ctl->ops.update_pending_flush_cdm(hw_ctl, hw_cdm->idx);
+
if (hw_ctl->ops.get_pending_flush)
pending_flush = hw_ctl->ops.get_pending_flush(hw_ctl);
@@ -374,8 +475,9 @@ static void dpu_encoder_phys_wb_setup(
dpu_encoder_phys_wb_setup_fb(phys_enc, fb);
- dpu_encoder_phys_wb_setup_cdp(phys_enc);
+ dpu_encoder_helper_phys_setup_cdm(phys_enc);
+ dpu_encoder_phys_wb_setup_ctl(phys_enc);
}
/**
@@ -580,20 +682,6 @@ static void dpu_encoder_phys_wb_disable(struct dpu_encoder_phys *phys_enc)
phys_enc->enable_state = DPU_ENC_DISABLED;
}
-/**
- * dpu_encoder_phys_wb_destroy - destroy writeback encoder
- * @phys_enc: Pointer to physical encoder
- */
-static void dpu_encoder_phys_wb_destroy(struct dpu_encoder_phys *phys_enc)
-{
- if (!phys_enc)
- return;
-
- DPU_DEBUG("[wb:%d]\n", phys_enc->hw_wb->idx - WB_0);
-
- kfree(phys_enc);
-}
-
static void dpu_encoder_phys_wb_prepare_wb_job(struct dpu_encoder_phys *phys_enc,
struct drm_writeback_job *job)
{
@@ -689,7 +777,6 @@ static void dpu_encoder_phys_wb_init_ops(struct dpu_encoder_phys_ops *ops)
ops->atomic_mode_set = dpu_encoder_phys_wb_atomic_mode_set;
ops->enable = dpu_encoder_phys_wb_enable;
ops->disable = dpu_encoder_phys_wb_disable;
- ops->destroy = dpu_encoder_phys_wb_destroy;
ops->atomic_check = dpu_encoder_phys_wb_atomic_check;
ops->wait_for_commit_done = dpu_encoder_phys_wb_wait_for_commit_done;
ops->prepare_for_kickoff = dpu_encoder_phys_wb_prepare_for_kickoff;
@@ -705,9 +792,10 @@ static void dpu_encoder_phys_wb_init_ops(struct dpu_encoder_phys_ops *ops)
/**
* dpu_encoder_phys_wb_init - initialize writeback encoder
+ * @dev: Corresponding device for devres management
* @p: Pointer to init info structure with initialization params
*/
-struct dpu_encoder_phys *dpu_encoder_phys_wb_init(
+struct dpu_encoder_phys *dpu_encoder_phys_wb_init(struct drm_device *dev,
struct dpu_enc_phys_init_params *p)
{
struct dpu_encoder_phys *phys_enc = NULL;
@@ -720,7 +808,7 @@ struct dpu_encoder_phys *dpu_encoder_phys_wb_init(
return ERR_PTR(-EINVAL);
}
- wb_enc = kzalloc(sizeof(*wb_enc), GFP_KERNEL);
+ wb_enc = drmm_kzalloc(dev, sizeof(*wb_enc), GFP_KERNEL);
if (!wb_enc) {
DPU_ERROR("failed to allocate wb phys_enc enc\n");
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index a1aada630780..54e8717403a0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -22,23 +22,14 @@
BIT(DPU_SSPP_CSC_10BIT))
#define VIG_MSM8998_MASK \
- (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3))
+ (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3_COMPATIBLE))
#define VIG_SDM845_MASK \
- (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3))
+ (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3_COMPATIBLE))
#define VIG_SDM845_MASK_SDMA \
(VIG_SDM845_MASK | BIT(DPU_SSPP_SMART_DMA_V2))
-#define VIG_SC7180_MASK \
- (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED4))
-
-#define VIG_SM6125_MASK \
- (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3LITE))
-
-#define VIG_SC7180_MASK_SDMA \
- (VIG_SC7180_MASK | BIT(DPU_SSPP_SMART_DMA_V2))
-
#define VIG_QCM2290_MASK (VIG_BASE_MASK | BIT(DPU_SSPP_QOS_8LVL))
#define DMA_MSM8998_MASK \
@@ -47,7 +38,7 @@
BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_EXCL_RECT))
#define VIG_SC7280_MASK \
- (VIG_SC7180_MASK | BIT(DPU_SSPP_INLINE_ROTATION))
+ (VIG_SDM845_MASK | BIT(DPU_SSPP_INLINE_ROTATION))
#define VIG_SC7280_MASK_SDMA \
(VIG_SC7280_MASK | BIT(DPU_SSPP_SMART_DMA_V2))
@@ -211,7 +202,7 @@ static const u32 rotation_v2_formats[] = {
/* TODO add formats after validation */
};
-static const uint32_t wb2_formats[] = {
+static const u32 wb2_formats_rgb[] = {
DRM_FORMAT_RGB565,
DRM_FORMAT_BGR565,
DRM_FORMAT_RGB888,
@@ -245,21 +236,56 @@ static const uint32_t wb2_formats[] = {
DRM_FORMAT_XBGR4444,
};
+static const u32 wb2_formats_rgb_yuv[] = {
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGBX5551,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_RGBA4444,
+ DRM_FORMAT_RGBX4444,
+ DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_BGRA5551,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_BGRX5551,
+ DRM_FORMAT_ABGR4444,
+ DRM_FORMAT_BGRA4444,
+ DRM_FORMAT_BGRX4444,
+ DRM_FORMAT_XBGR4444,
+ DRM_FORMAT_NV12,
+};
+
/*************************************************************
* SSPP sub blocks config
*************************************************************/
+#define SSPP_SCALER_VER(maj, min) (((maj) << 16) | (min))
+
/* SSPP common configuration */
-#define _VIG_SBLK(sdma_pri, qseed_ver) \
+#define _VIG_SBLK(scaler_ver) \
{ \
.maxdwnscale = MAX_DOWNSCALE_RATIO, \
.maxupscale = MAX_UPSCALE_RATIO, \
- .smart_dma_priority = sdma_pri, \
.scaler_blk = {.name = "scaler", \
- .id = qseed_ver, \
+ .version = scaler_ver, \
.base = 0xa00, .len = 0xa0,}, \
.csc_blk = {.name = "csc", \
- .id = DPU_SSPP_CSC_10BIT, \
.base = 0x1a00, .len = 0x100,}, \
.format_list = plane_formats_yuv, \
.num_formats = ARRAY_SIZE(plane_formats_yuv), \
@@ -268,16 +294,14 @@ static const uint32_t wb2_formats[] = {
.rotation_cfg = NULL, \
}
-#define _VIG_SBLK_ROT(sdma_pri, qseed_ver, rot_cfg) \
+#define _VIG_SBLK_ROT(scaler_ver, rot_cfg) \
{ \
.maxdwnscale = MAX_DOWNSCALE_RATIO, \
.maxupscale = MAX_UPSCALE_RATIO, \
- .smart_dma_priority = sdma_pri, \
.scaler_blk = {.name = "scaler", \
- .id = qseed_ver, \
+ .version = scaler_ver, \
.base = 0xa00, .len = 0xa0,}, \
.csc_blk = {.name = "csc", \
- .id = DPU_SSPP_CSC_10BIT, \
.base = 0x1a00, .len = 0x100,}, \
.format_list = plane_formats_yuv, \
.num_formats = ARRAY_SIZE(plane_formats_yuv), \
@@ -286,91 +310,64 @@ static const uint32_t wb2_formats[] = {
.rotation_cfg = rot_cfg, \
}
-#define _DMA_SBLK(sdma_pri) \
+#define _VIG_SBLK_NOSCALE() \
+ { \
+ .maxdwnscale = SSPP_UNITY_SCALE, \
+ .maxupscale = SSPP_UNITY_SCALE, \
+ .format_list = plane_formats_yuv, \
+ .num_formats = ARRAY_SIZE(plane_formats_yuv), \
+ .virt_format_list = plane_formats, \
+ .virt_num_formats = ARRAY_SIZE(plane_formats), \
+ }
+
+#define _DMA_SBLK() \
{ \
.maxdwnscale = SSPP_UNITY_SCALE, \
.maxupscale = SSPP_UNITY_SCALE, \
- .smart_dma_priority = sdma_pri, \
.format_list = plane_formats, \
.num_formats = ARRAY_SIZE(plane_formats), \
.virt_format_list = plane_formats, \
.virt_num_formats = ARRAY_SIZE(plane_formats), \
}
-static const struct dpu_sspp_sub_blks msm8998_vig_sblk_0 =
- _VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3);
-static const struct dpu_sspp_sub_blks msm8998_vig_sblk_1 =
- _VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3);
-static const struct dpu_sspp_sub_blks msm8998_vig_sblk_2 =
- _VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3);
-static const struct dpu_sspp_sub_blks msm8998_vig_sblk_3 =
- _VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3);
-
static const struct dpu_rotation_cfg dpu_rot_sc7280_cfg_v2 = {
.rot_maxheight = 1088,
.rot_num_formats = ARRAY_SIZE(rotation_v2_formats),
.rot_format_list = rotation_v2_formats,
};
-static const struct dpu_sspp_sub_blks sdm845_vig_sblk_0 =
- _VIG_SBLK(5, DPU_SSPP_SCALER_QSEED3);
-static const struct dpu_sspp_sub_blks sdm845_vig_sblk_1 =
- _VIG_SBLK(6, DPU_SSPP_SCALER_QSEED3);
-static const struct dpu_sspp_sub_blks sdm845_vig_sblk_2 =
- _VIG_SBLK(7, DPU_SSPP_SCALER_QSEED3);
-static const struct dpu_sspp_sub_blks sdm845_vig_sblk_3 =
- _VIG_SBLK(8, DPU_SSPP_SCALER_QSEED3);
-
-static const struct dpu_sspp_sub_blks sdm845_dma_sblk_0 = _DMA_SBLK(1);
-static const struct dpu_sspp_sub_blks sdm845_dma_sblk_1 = _DMA_SBLK(2);
-static const struct dpu_sspp_sub_blks sdm845_dma_sblk_2 = _DMA_SBLK(3);
-static const struct dpu_sspp_sub_blks sdm845_dma_sblk_3 = _DMA_SBLK(4);
-
-static const struct dpu_sspp_sub_blks sc7180_vig_sblk_0 =
- _VIG_SBLK(4, DPU_SSPP_SCALER_QSEED4);
-
-static const struct dpu_sspp_sub_blks sc7280_vig_sblk_0 =
- _VIG_SBLK_ROT(4, DPU_SSPP_SCALER_QSEED4, &dpu_rot_sc7280_cfg_v2);
-
-static const struct dpu_sspp_sub_blks sm6115_vig_sblk_0 =
- _VIG_SBLK(2, DPU_SSPP_SCALER_QSEED4);
-
-static const struct dpu_sspp_sub_blks sm6125_vig_sblk_0 =
- _VIG_SBLK(3, DPU_SSPP_SCALER_QSEED3LITE);
-
-static const struct dpu_sspp_sub_blks sm8250_vig_sblk_0 =
- _VIG_SBLK(5, DPU_SSPP_SCALER_QSEED4);
-static const struct dpu_sspp_sub_blks sm8250_vig_sblk_1 =
- _VIG_SBLK(6, DPU_SSPP_SCALER_QSEED4);
-static const struct dpu_sspp_sub_blks sm8250_vig_sblk_2 =
- _VIG_SBLK(7, DPU_SSPP_SCALER_QSEED4);
-static const struct dpu_sspp_sub_blks sm8250_vig_sblk_3 =
- _VIG_SBLK(8, DPU_SSPP_SCALER_QSEED4);
-
-static const struct dpu_sspp_sub_blks sm8550_vig_sblk_0 =
- _VIG_SBLK(7, DPU_SSPP_SCALER_QSEED4);
-static const struct dpu_sspp_sub_blks sm8550_vig_sblk_1 =
- _VIG_SBLK(8, DPU_SSPP_SCALER_QSEED4);
-static const struct dpu_sspp_sub_blks sm8550_vig_sblk_2 =
- _VIG_SBLK(9, DPU_SSPP_SCALER_QSEED4);
-static const struct dpu_sspp_sub_blks sm8550_vig_sblk_3 =
- _VIG_SBLK(10, DPU_SSPP_SCALER_QSEED4);
-static const struct dpu_sspp_sub_blks sm8550_dma_sblk_4 = _DMA_SBLK(5);
-static const struct dpu_sspp_sub_blks sm8550_dma_sblk_5 = _DMA_SBLK(6);
-
-#define _VIG_SBLK_NOSCALE(sdma_pri) \
- { \
- .maxdwnscale = SSPP_UNITY_SCALE, \
- .maxupscale = SSPP_UNITY_SCALE, \
- .smart_dma_priority = sdma_pri, \
- .format_list = plane_formats_yuv, \
- .num_formats = ARRAY_SIZE(plane_formats_yuv), \
- .virt_format_list = plane_formats, \
- .virt_num_formats = ARRAY_SIZE(plane_formats), \
- }
+static const struct dpu_sspp_sub_blks dpu_vig_sblk_noscale =
+ _VIG_SBLK_NOSCALE();
+
+static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_1_2 =
+ _VIG_SBLK(SSPP_SCALER_VER(1, 2));
+
+static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_1_3 =
+ _VIG_SBLK(SSPP_SCALER_VER(1, 3));
+
+static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_1_4 =
+ _VIG_SBLK(SSPP_SCALER_VER(1, 4));
+
+static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_2_4 =
+ _VIG_SBLK(SSPP_SCALER_VER(2, 4));
+
+static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_3_0 =
+ _VIG_SBLK(SSPP_SCALER_VER(3, 0));
+
+static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_3_0_rot_v2 =
+ _VIG_SBLK_ROT(SSPP_SCALER_VER(3, 0),
+ &dpu_rot_sc7280_cfg_v2);
+
+static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_3_1 =
+ _VIG_SBLK(SSPP_SCALER_VER(3, 1));
+
+static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_3_2 =
+ _VIG_SBLK(SSPP_SCALER_VER(3, 2));
+
+static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_3_3 =
+ _VIG_SBLK(SSPP_SCALER_VER(3, 3));
-static const struct dpu_sspp_sub_blks qcm2290_vig_sblk_0 = _VIG_SBLK_NOSCALE(2);
-static const struct dpu_sspp_sub_blks qcm2290_dma_sblk_0 = _DMA_SBLK(1);
+static const struct dpu_sspp_sub_blks dpu_dma_sblk = _DMA_SBLK();
/*************************************************************
* MIXER sub blocks config
@@ -422,12 +419,12 @@ static const struct dpu_lm_sub_blks qcm2290_lm_sblk = {
* DSPP sub blocks config
*************************************************************/
static const struct dpu_dspp_sub_blks msm8998_dspp_sblk = {
- .pcc = {.name = "pcc", .id = DPU_DSPP_PCC, .base = 0x1700,
+ .pcc = {.name = "pcc", .base = 0x1700,
.len = 0x90, .version = 0x10007},
};
static const struct dpu_dspp_sub_blks sdm845_dspp_sblk = {
- .pcc = {.name = "pcc", .id = DPU_DSPP_PCC, .base = 0x1700,
+ .pcc = {.name = "pcc", .base = 0x1700,
.len = 0x90, .version = 0x40000},
};
@@ -435,19 +432,19 @@ static const struct dpu_dspp_sub_blks sdm845_dspp_sblk = {
* PINGPONG sub blocks config
*************************************************************/
static const struct dpu_pingpong_sub_blks sdm845_pp_sblk_te = {
- .te2 = {.name = "te2", .id = DPU_PINGPONG_TE2, .base = 0x2000, .len = 0x0,
+ .te2 = {.name = "te2", .base = 0x2000, .len = 0x0,
.version = 0x1},
- .dither = {.name = "dither", .id = DPU_PINGPONG_DITHER, .base = 0x30e0,
+ .dither = {.name = "dither", .base = 0x30e0,
.len = 0x20, .version = 0x10000},
};
static const struct dpu_pingpong_sub_blks sdm845_pp_sblk = {
- .dither = {.name = "dither", .id = DPU_PINGPONG_DITHER, .base = 0x30e0,
+ .dither = {.name = "dither", .base = 0x30e0,
.len = 0x20, .version = 0x10000},
};
static const struct dpu_pingpong_sub_blks sc7280_pp_sblk = {
- .dither = {.name = "dither", .id = DPU_PINGPONG_DITHER, .base = 0xe0,
+ .dither = {.name = "dither", .base = 0xe0,
.len = 0x20, .version = 0x20000},
};
@@ -465,6 +462,16 @@ static const struct dpu_dsc_sub_blks dsc_sblk_1 = {
};
/*************************************************************
+ * CDM block config
+ *************************************************************/
+static const struct dpu_cdm_cfg sc7280_cdm = {
+ .name = "cdm_0",
+ .id = CDM_0,
+ .len = 0x228,
+ .base = 0x79200,
+};
+
+/*************************************************************
* VBIF sub blocks config
*************************************************************/
/* VBIF QOS remap */
@@ -472,6 +479,7 @@ static const u32 msm8998_rt_pri_lvl[] = {1, 2, 2, 2};
static const u32 msm8998_nrt_pri_lvl[] = {1, 1, 1, 1};
static const u32 sdm845_rt_pri_lvl[] = {3, 3, 4, 4, 5, 5, 6, 6};
static const u32 sdm845_nrt_pri_lvl[] = {3, 3, 3, 3, 3, 3, 3, 3};
+static const u32 sm8650_rt_pri_lvl[] = {4, 4, 5, 5, 5, 5, 5, 6};
static const struct dpu_vbif_dynamic_ot_cfg msm8998_ot_rdwr_cfg[] = {
{
@@ -558,6 +566,26 @@ static const struct dpu_vbif_cfg sm8550_vbif[] = {
},
};
+static const struct dpu_vbif_cfg sm8650_vbif[] = {
+ {
+ .name = "vbif_rt", .id = VBIF_RT,
+ .base = 0, .len = 0x1074,
+ .features = BIT(DPU_VBIF_QOS_REMAP),
+ .xin_halt_timeout = 0x4000,
+ .qos_rp_remap_size = 0x40,
+ .qos_rt_tbl = {
+ .npriority_lvl = ARRAY_SIZE(sm8650_rt_pri_lvl),
+ .priority_lvl = sm8650_rt_pri_lvl,
+ },
+ .qos_nrt_tbl = {
+ .npriority_lvl = ARRAY_SIZE(sdm845_nrt_pri_lvl),
+ .priority_lvl = sdm845_nrt_pri_lvl,
+ },
+ .memtype_count = 16,
+ .memtype = {3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3},
+ },
+};
+
/*************************************************************
* PERF data config
*************************************************************/
@@ -654,6 +682,7 @@ static const struct dpu_qos_lut_entry sc7180_qos_nrt[] = {
#include "catalog/dpu_3_0_msm8998.h"
#include "catalog/dpu_4_0_sdm845.h"
+#include "catalog/dpu_4_1_sdm670.h"
#include "catalog/dpu_5_0_sm8150.h"
#include "catalog/dpu_5_1_sc8180x.h"
@@ -673,3 +702,5 @@ static const struct dpu_qos_lut_entry sc7180_qos_nrt[] = {
#include "catalog/dpu_8_1_sm8450.h"
#include "catalog/dpu_9_0_sm8550.h"
+
+#include "catalog/dpu_10_0_sm8650.h"
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index df024e10d3a3..ba82ef4560a6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -51,9 +51,7 @@ enum {
/**
* SSPP sub-blocks/features
* @DPU_SSPP_SCALER_QSEED2, QSEED2 algorithm support
- * @DPU_SSPP_SCALER_QSEED3, QSEED3 alogorithm support
- * @DPU_SSPP_SCALER_QSEED3LITE, QSEED3 Lite alogorithm support
- * @DPU_SSPP_SCALER_QSEED4, QSEED4 algorithm support
+ * @DPU_SSPP_SCALER_QSEED3_COMPATIBLE, QSEED3-compatible alogorithm support (includes QSEED3, QSEED3LITE and QSEED4)
* @DPU_SSPP_SCALER_RGB, RGB Scaler, supported by RGB pipes
* @DPU_SSPP_CSC, Support of Color space converion
* @DPU_SSPP_CSC_10BIT, Support of 10-bit Color space conversion
@@ -71,9 +69,7 @@ enum {
*/
enum {
DPU_SSPP_SCALER_QSEED2 = 0x1,
- DPU_SSPP_SCALER_QSEED3,
- DPU_SSPP_SCALER_QSEED3LITE,
- DPU_SSPP_SCALER_QSEED4,
+ DPU_SSPP_SCALER_QSEED3_COMPATIBLE,
DPU_SSPP_SCALER_RGB,
DPU_SSPP_CSC,
DPU_SSPP_CSC_10BIT,
@@ -249,49 +245,50 @@ enum {
unsigned long features
/**
- * MACRO DPU_HW_SUBBLK_INFO - information of HW sub-block inside DPU
- * @name: string name for debug purposes
- * @id: enum identifying this sub-block
- * @base: offset of this sub-block relative to the block
- * offset
- * @len register block length of this sub-block
- */
-#define DPU_HW_SUBBLK_INFO \
- char name[DPU_HW_BLK_NAME_LEN]; \
- u32 id; \
- u32 base; \
- u32 len
-
-/**
* struct dpu_scaler_blk: Scaler information
- * @info: HW register and features supported by this sub-blk
- * @version: qseed block revision
+ * @name: string name for debug purposes
+ * @base: offset of this sub-block relative to the block offset
+ * @len: register block length of this sub-block
+ * @version: qseed block revision, on QSEED3+ platforms this is the value of
+ * scaler_blk.base + QSEED3_HW_VERSION registers.
*/
struct dpu_scaler_blk {
- DPU_HW_SUBBLK_INFO;
+ char name[DPU_HW_BLK_NAME_LEN];
+ u32 base;
+ u32 len;
u32 version;
};
struct dpu_csc_blk {
- DPU_HW_SUBBLK_INFO;
+ char name[DPU_HW_BLK_NAME_LEN];
+ u32 base;
+ u32 len;
};
/**
* struct dpu_pp_blk : Pixel processing sub-blk information
- * @info: HW register and features supported by this sub-blk
+ * @name: string name for debug purposes
+ * @base: offset of this sub-block relative to the block offset
+ * @len: register block length of this sub-block
* @version: HW Algorithm version
*/
struct dpu_pp_blk {
- DPU_HW_SUBBLK_INFO;
+ char name[DPU_HW_BLK_NAME_LEN];
+ u32 base;
+ u32 len;
u32 version;
};
/**
* struct dpu_dsc_blk - DSC Encoder sub-blk information
- * @info: HW register and features supported by this sub-blk
+ * @name: string name for debug purposes
+ * @base: offset of this sub-block relative to the block offset
+ * @len: register block length of this sub-block
*/
struct dpu_dsc_blk {
- DPU_HW_SUBBLK_INFO;
+ char name[DPU_HW_BLK_NAME_LEN];
+ u32 base;
+ u32 len;
};
/**
@@ -341,7 +338,6 @@ struct dpu_rotation_cfg {
* @max_mixer_width max layer mixer line width support.
* @max_mixer_blendstages max layer mixer blend stages or
* supported z order
- * @qseed_type qseed2 or qseed3 support.
* @has_src_split source split feature status
* @has_dim_layer dim layer feature status
* @has_idle_pc indicate if idle power collapse feature is supported
@@ -354,7 +350,6 @@ struct dpu_rotation_cfg {
struct dpu_caps {
u32 max_mixer_width;
u32 max_mixer_blendstages;
- u32 qseed_type;
bool has_src_split;
bool has_dim_layer;
bool has_idle_pc;
@@ -371,7 +366,6 @@ struct dpu_caps {
* common: Pointer to common configurations shared by sub blocks
* @maxdwnscale: max downscale ratio supported(without DECIMATION)
* @maxupscale: maxupscale ratio supported
- * @smart_dma_priority: hw priority of rect1 of multirect pipe
* @max_per_pipe_bw: maximum allowable bandwidth of this pipe in kBps
* @qseed_ver: qseed version
* @scaler_blk:
@@ -385,7 +379,6 @@ struct dpu_caps {
struct dpu_sspp_sub_blks {
u32 maxdwnscale;
u32 maxupscale;
- u32 smart_dma_priority;
u32 max_per_pipe_bw;
u32 qseed_ver;
struct dpu_scaler_blk scaler_blk;
@@ -690,6 +683,17 @@ struct dpu_vbif_cfg {
};
/**
+ * struct dpu_cdm_cfg - information of chroma down blocks
+ * @name string name for debug purposes
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ */
+struct dpu_cdm_cfg {
+ DPU_HW_BLK_INFO;
+};
+
+/**
* Define CDP use cases
* @DPU_PERF_CDP_UDAGE_RT: real-time use cases
* @DPU_PERF_CDP_USAGE_NRT: non real-time use cases such as WFD
@@ -812,6 +816,8 @@ struct dpu_mdss_cfg {
u32 wb_count;
const struct dpu_wb_cfg *wb;
+ const struct dpu_cdm_cfg *cdm;
+
u32 ad_count;
u32 dspp_count;
@@ -827,6 +833,7 @@ struct dpu_mdss_cfg {
extern const struct dpu_mdss_cfg dpu_msm8998_cfg;
extern const struct dpu_mdss_cfg dpu_sdm845_cfg;
+extern const struct dpu_mdss_cfg dpu_sdm670_cfg;
extern const struct dpu_mdss_cfg dpu_sm8150_cfg;
extern const struct dpu_mdss_cfg dpu_sc8180x_cfg;
extern const struct dpu_mdss_cfg dpu_sm8250_cfg;
@@ -841,5 +848,6 @@ extern const struct dpu_mdss_cfg dpu_sc7280_cfg;
extern const struct dpu_mdss_cfg dpu_sc8280xp_cfg;
extern const struct dpu_mdss_cfg dpu_sm8450_cfg;
extern const struct dpu_mdss_cfg dpu_sm8550_cfg;
+extern const struct dpu_mdss_cfg dpu_sm8650_cfg;
#endif /* _DPU_HW_CATALOG_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
new file mode 100644
index 000000000000..e9cdc7934a49
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
@@ -0,0 +1,247 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/bitfield.h>
+
+#include <drm/drm_managed.h>
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_cdm.h"
+#include "dpu_kms.h"
+
+#define CDM_CSC_10_OPMODE 0x000
+#define CDM_CSC_10_BASE 0x004
+
+#define CDM_CDWN2_OP_MODE 0x100
+#define CDM_CDWN2_CLAMP_OUT 0x104
+#define CDM_CDWN2_PARAMS_3D_0 0x108
+#define CDM_CDWN2_PARAMS_3D_1 0x10C
+#define CDM_CDWN2_COEFF_COSITE_H_0 0x110
+#define CDM_CDWN2_COEFF_COSITE_H_1 0x114
+#define CDM_CDWN2_COEFF_COSITE_H_2 0x118
+#define CDM_CDWN2_COEFF_OFFSITE_H_0 0x11C
+#define CDM_CDWN2_COEFF_OFFSITE_H_1 0x120
+#define CDM_CDWN2_COEFF_OFFSITE_H_2 0x124
+#define CDM_CDWN2_COEFF_COSITE_V 0x128
+#define CDM_CDWN2_COEFF_OFFSITE_V 0x12C
+#define CDM_CDWN2_OUT_SIZE 0x130
+
+#define CDM_HDMI_PACK_OP_MODE 0x200
+#define CDM_CSC_10_MATRIX_COEFF_0 0x004
+
+#define CDM_MUX 0x224
+
+/* CDM CDWN2 sub-block bit definitions */
+#define CDM_CDWN2_OP_MODE_EN BIT(0)
+#define CDM_CDWN2_OP_MODE_ENABLE_H BIT(1)
+#define CDM_CDWN2_OP_MODE_ENABLE_V BIT(2)
+#define CDM_CDWN2_OP_MODE_BITS_OUT_8BIT BIT(7)
+#define CDM_CDWN2_V_PIXEL_METHOD_MASK GENMASK(6, 5)
+#define CDM_CDWN2_H_PIXEL_METHOD_MASK GENMASK(4, 3)
+
+/* CDM CSC10 sub-block bit definitions */
+#define CDM_CSC10_OP_MODE_EN BIT(0)
+#define CDM_CSC10_OP_MODE_SRC_FMT_YUV BIT(1)
+#define CDM_CSC10_OP_MODE_DST_FMT_YUV BIT(2)
+
+/* CDM HDMI pack sub-block bit definitions */
+#define CDM_HDMI_PACK_OP_MODE_EN BIT(0)
+
+/*
+ * Horizontal coefficients for cosite chroma downscale
+ * s13 representation of coefficients
+ */
+static u32 cosite_h_coeff[] = {0x00000016, 0x000001cc, 0x0100009e};
+
+/*
+ * Horizontal coefficients for offsite chroma downscale
+ */
+static u32 offsite_h_coeff[] = {0x000b0005, 0x01db01eb, 0x00e40046};
+
+/*
+ * Vertical coefficients for cosite chroma downscale
+ */
+static u32 cosite_v_coeff[] = {0x00080004};
+/*
+ * Vertical coefficients for offsite chroma downscale
+ */
+static u32 offsite_v_coeff[] = {0x00060002};
+
+static int dpu_hw_cdm_setup_cdwn(struct dpu_hw_cdm *ctx, struct dpu_hw_cdm_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 opmode;
+ u32 out_size;
+
+ switch (cfg->h_cdwn_type) {
+ case CDM_CDWN_DISABLE:
+ opmode = 0;
+ break;
+ case CDM_CDWN_PIXEL_DROP:
+ opmode = CDM_CDWN2_OP_MODE_ENABLE_H |
+ FIELD_PREP(CDM_CDWN2_H_PIXEL_METHOD_MASK,
+ CDM_CDWN2_METHOD_PIXEL_DROP);
+ break;
+ case CDM_CDWN_AVG:
+ opmode = CDM_CDWN2_OP_MODE_ENABLE_H |
+ FIELD_PREP(CDM_CDWN2_H_PIXEL_METHOD_MASK,
+ CDM_CDWN2_METHOD_AVG);
+ break;
+ case CDM_CDWN_COSITE:
+ opmode = CDM_CDWN2_OP_MODE_ENABLE_H |
+ FIELD_PREP(CDM_CDWN2_H_PIXEL_METHOD_MASK,
+ CDM_CDWN2_METHOD_COSITE);
+ DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_0,
+ cosite_h_coeff[0]);
+ DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_1,
+ cosite_h_coeff[1]);
+ DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_2,
+ cosite_h_coeff[2]);
+ break;
+ case CDM_CDWN_OFFSITE:
+ opmode = CDM_CDWN2_OP_MODE_ENABLE_H |
+ FIELD_PREP(CDM_CDWN2_H_PIXEL_METHOD_MASK, CDM_CDWN2_METHOD_OFFSITE);
+ DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_0,
+ offsite_h_coeff[0]);
+ DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_1,
+ offsite_h_coeff[1]);
+ DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_2,
+ offsite_h_coeff[2]);
+ break;
+ default:
+ DPU_ERROR("%s invalid horz down sampling type\n", __func__);
+ return -EINVAL;
+ }
+
+ switch (cfg->v_cdwn_type) {
+ case CDM_CDWN_DISABLE:
+ /* if its only Horizontal downsample, we dont need to do anything here */
+ break;
+ case CDM_CDWN_PIXEL_DROP:
+ opmode |= CDM_CDWN2_OP_MODE_ENABLE_V |
+ FIELD_PREP(CDM_CDWN2_V_PIXEL_METHOD_MASK,
+ CDM_CDWN2_METHOD_PIXEL_DROP);
+ break;
+ case CDM_CDWN_AVG:
+ opmode |= CDM_CDWN2_OP_MODE_ENABLE_V |
+ FIELD_PREP(CDM_CDWN2_V_PIXEL_METHOD_MASK,
+ CDM_CDWN2_METHOD_AVG);
+ break;
+ case CDM_CDWN_COSITE:
+ opmode |= CDM_CDWN2_OP_MODE_ENABLE_V |
+ FIELD_PREP(CDM_CDWN2_V_PIXEL_METHOD_MASK,
+ CDM_CDWN2_METHOD_COSITE);
+ DPU_REG_WRITE(c,
+ CDM_CDWN2_COEFF_COSITE_V,
+ cosite_v_coeff[0]);
+ break;
+ case CDM_CDWN_OFFSITE:
+ opmode |= CDM_CDWN2_OP_MODE_ENABLE_V |
+ FIELD_PREP(CDM_CDWN2_V_PIXEL_METHOD_MASK,
+ CDM_CDWN2_METHOD_OFFSITE);
+ DPU_REG_WRITE(c,
+ CDM_CDWN2_COEFF_OFFSITE_V,
+ offsite_v_coeff[0]);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (cfg->output_bit_depth != CDM_CDWN_OUTPUT_10BIT)
+ opmode |= CDM_CDWN2_OP_MODE_BITS_OUT_8BIT;
+
+ if (cfg->v_cdwn_type || cfg->h_cdwn_type)
+ opmode |= CDM_CDWN2_OP_MODE_EN; /* EN CDWN module */
+ else
+ opmode &= ~CDM_CDWN2_OP_MODE_EN;
+
+ out_size = (cfg->output_width & 0xFFFF) | ((cfg->output_height & 0xFFFF) << 16);
+ DPU_REG_WRITE(c, CDM_CDWN2_OUT_SIZE, out_size);
+ DPU_REG_WRITE(c, CDM_CDWN2_OP_MODE, opmode);
+ DPU_REG_WRITE(c, CDM_CDWN2_CLAMP_OUT, ((0x3FF << 16) | 0x0));
+
+ return 0;
+}
+
+static int dpu_hw_cdm_enable(struct dpu_hw_cdm *ctx, struct dpu_hw_cdm_cfg *cdm)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ const struct dpu_format *fmt;
+ u32 opmode = 0;
+ u32 csc = 0;
+
+ if (!ctx || !cdm)
+ return -EINVAL;
+
+ fmt = cdm->output_fmt;
+
+ if (!DPU_FORMAT_IS_YUV(fmt))
+ return -EINVAL;
+
+ dpu_hw_csc_setup(&ctx->hw, CDM_CSC_10_MATRIX_COEFF_0, cdm->csc_cfg, true);
+ dpu_hw_cdm_setup_cdwn(ctx, cdm);
+
+ if (cdm->output_type == CDM_CDWN_OUTPUT_HDMI) {
+ if (fmt->chroma_sample != DPU_CHROMA_H1V2)
+ return -EINVAL; /*unsupported format */
+ opmode = CDM_HDMI_PACK_OP_MODE_EN;
+ opmode |= (fmt->chroma_sample << 1);
+ }
+
+ csc |= CDM_CSC10_OP_MODE_DST_FMT_YUV;
+ csc &= ~CDM_CSC10_OP_MODE_SRC_FMT_YUV;
+ csc |= CDM_CSC10_OP_MODE_EN;
+
+ if (ctx && ctx->ops.bind_pingpong_blk)
+ ctx->ops.bind_pingpong_blk(ctx, cdm->pp_id);
+
+ DPU_REG_WRITE(c, CDM_CSC_10_OPMODE, csc);
+ DPU_REG_WRITE(c, CDM_HDMI_PACK_OP_MODE, opmode);
+ return 0;
+}
+
+static void dpu_hw_cdm_bind_pingpong_blk(struct dpu_hw_cdm *ctx, const enum dpu_pingpong pp)
+{
+ struct dpu_hw_blk_reg_map *c;
+ int mux_cfg;
+
+ c = &ctx->hw;
+
+ mux_cfg = DPU_REG_READ(c, CDM_MUX);
+ mux_cfg &= ~0xf;
+
+ if (pp)
+ mux_cfg |= (pp - PINGPONG_0) & 0x7;
+ else
+ mux_cfg |= 0xf;
+
+ DPU_REG_WRITE(c, CDM_MUX, mux_cfg);
+}
+
+struct dpu_hw_cdm *dpu_hw_cdm_init(struct drm_device *dev,
+ const struct dpu_cdm_cfg *cfg, void __iomem *addr,
+ const struct dpu_mdss_version *mdss_rev)
+{
+ struct dpu_hw_cdm *c;
+
+ c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ c->hw.blk_addr = addr + cfg->base;
+ c->hw.log_mask = DPU_DBG_MASK_CDM;
+
+ /* Assign ops */
+ c->idx = cfg->id;
+ c->caps = cfg;
+
+ c->ops.enable = dpu_hw_cdm_enable;
+ if (mdss_rev->core_major_ver >= 5)
+ c->ops.bind_pingpong_blk = dpu_hw_cdm_bind_pingpong_blk;
+
+ return c;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h
new file mode 100644
index 000000000000..348424df87c6
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_HW_CDM_H
+#define _DPU_HW_CDM_H
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_top.h"
+
+struct dpu_hw_cdm;
+
+/**
+ * struct dpu_hw_cdm_cfg : current configuration of CDM block
+ *
+ * @output_width: output ROI width of CDM block
+ * @output_height: output ROI height of CDM block
+ * @output_bit_depth: output bit-depth of CDM block
+ * @h_cdwn_type: downsample type used for horizontal pixels
+ * @v_cdwn_type: downsample type used for vertical pixels
+ * @output_fmt: handle to dpu_format of CDM block
+ * @csc_cfg: handle to CSC matrix programmed for CDM block
+ * @output_type: interface to which CDM is paired (HDMI/WB)
+ * @pp_id: ping-pong block to which CDM is bound to
+ */
+struct dpu_hw_cdm_cfg {
+ u32 output_width;
+ u32 output_height;
+ u32 output_bit_depth;
+ u32 h_cdwn_type;
+ u32 v_cdwn_type;
+ const struct dpu_format *output_fmt;
+ const struct dpu_csc_cfg *csc_cfg;
+ u32 output_type;
+ int pp_id;
+};
+
+/*
+ * These values are used indicate which type of downsample is used
+ * in the horizontal/vertical direction for the CDM block.
+ */
+enum dpu_hw_cdwn_type {
+ CDM_CDWN_DISABLE,
+ CDM_CDWN_PIXEL_DROP,
+ CDM_CDWN_AVG,
+ CDM_CDWN_COSITE,
+ CDM_CDWN_OFFSITE,
+};
+
+/*
+ * CDM block can be paired with WB or HDMI block. These values match
+ * the input with which the CDM block is paired.
+ */
+enum dpu_hw_cdwn_output_type {
+ CDM_CDWN_OUTPUT_HDMI,
+ CDM_CDWN_OUTPUT_WB,
+};
+
+/*
+ * CDM block can give an 8-bit or 10-bit output. These values
+ * are used to indicate the output bit depth of CDM block
+ */
+enum dpu_hw_cdwn_output_bit_depth {
+ CDM_CDWN_OUTPUT_8BIT,
+ CDM_CDWN_OUTPUT_10BIT,
+};
+
+/*
+ * CDM block can downsample using different methods. These values
+ * are used to indicate the downsample method which can be used
+ * either in the horizontal or vertical direction.
+ */
+enum dpu_hw_cdwn_op_mode_method_h_v {
+ CDM_CDWN2_METHOD_PIXEL_DROP,
+ CDM_CDWN2_METHOD_AVG,
+ CDM_CDWN2_METHOD_COSITE,
+ CDM_CDWN2_METHOD_OFFSITE
+};
+
+/**
+ * struct dpu_hw_cdm_ops : Interface to the chroma down Hw driver functions
+ * Assumption is these functions will be called after
+ * clocks are enabled
+ * @enable: Enables the output to interface and programs the
+ * output packer
+ * @bind_pingpong_blk: enable/disable the connection with pingpong which
+ * will feed pixels to this cdm
+ */
+struct dpu_hw_cdm_ops {
+ /**
+ * Enable the CDM module
+ * @cdm Pointer to chroma down context
+ */
+ int (*enable)(struct dpu_hw_cdm *cdm, struct dpu_hw_cdm_cfg *cfg);
+
+ /**
+ * Enable/disable the connection with pingpong
+ * @cdm Pointer to chroma down context
+ * @pp pingpong block id.
+ */
+ void (*bind_pingpong_blk)(struct dpu_hw_cdm *cdm, const enum dpu_pingpong pp);
+};
+
+/**
+ * struct dpu_hw_cdm - cdm description
+ * @base: Hardware block base structure
+ * @hw: Block hardware details
+ * @idx: CDM index
+ * @caps: Pointer to cdm_cfg
+ * @ops: handle to operations possible for this CDM
+ */
+struct dpu_hw_cdm {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* chroma down */
+ const struct dpu_cdm_cfg *caps;
+ enum dpu_cdm idx;
+
+ /* ops */
+ struct dpu_hw_cdm_ops ops;
+};
+
+/**
+ * dpu_hw_cdm_init - initializes the cdm hw driver object.
+ * should be called once before accessing every cdm.
+ * @dev: DRM device handle
+ * @cdm: CDM catalog entry for which driver object is required
+ * @addr : mapped register io address of MDSS
+ * @mdss_rev: mdss hw core revision
+ */
+struct dpu_hw_cdm *dpu_hw_cdm_init(struct drm_device *dev,
+ const struct dpu_cdm_cfg *cdm, void __iomem *addr,
+ const struct dpu_mdss_version *mdss_rev);
+
+static inline struct dpu_hw_cdm *to_dpu_hw_cdm(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_cdm, base);
+}
+
+#endif /*_DPU_HW_CDM_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index 86182c734606..e76565c3e6a4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -4,6 +4,9 @@
*/
#include <linux/delay.h>
+
+#include <drm/drm_managed.h>
+
#include "dpu_hwio.h"
#include "dpu_hw_ctl.h"
#include "dpu_kms.h"
@@ -29,11 +32,13 @@
#define CTL_DSC_ACTIVE 0x0E8
#define CTL_WB_ACTIVE 0x0EC
#define CTL_INTF_ACTIVE 0x0F4
+#define CTL_CDM_ACTIVE 0x0F8
#define CTL_FETCH_PIPE_ACTIVE 0x0FC
#define CTL_MERGE_3D_FLUSH 0x100
#define CTL_DSC_FLUSH 0x104
#define CTL_WB_FLUSH 0x108
#define CTL_INTF_FLUSH 0x110
+#define CTL_CDM_FLUSH 0x114
#define CTL_INTF_MASTER 0x134
#define CTL_DSPP_n_FLUSH(n) ((0x13C) + ((n) * 4))
@@ -43,6 +48,7 @@
#define DPU_REG_RESET_TIMEOUT_US 2000
#define MERGE_3D_IDX 23
#define DSC_IDX 22
+#define CDM_IDX 26
#define INTF_IDX 31
#define WB_IDX 16
#define DSPP_IDX 29 /* From DPU hw rev 7.x.x */
@@ -104,6 +110,7 @@ static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
ctx->pending_wb_flush_mask = 0;
ctx->pending_merge_3d_flush_mask = 0;
ctx->pending_dsc_flush_mask = 0;
+ ctx->pending_cdm_flush_mask = 0;
memset(ctx->pending_dspp_flush_mask, 0,
sizeof(ctx->pending_dspp_flush_mask));
@@ -148,6 +155,10 @@ static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx)
DPU_REG_WRITE(&ctx->hw, CTL_DSC_FLUSH,
ctx->pending_dsc_flush_mask);
+ if (ctx->pending_flush_mask & BIT(CDM_IDX))
+ DPU_REG_WRITE(&ctx->hw, CTL_CDM_FLUSH,
+ ctx->pending_cdm_flush_mask);
+
DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
}
@@ -279,6 +290,13 @@ static void dpu_hw_ctl_update_pending_flush_wb(struct dpu_hw_ctl *ctx,
}
}
+static void dpu_hw_ctl_update_pending_flush_cdm(struct dpu_hw_ctl *ctx, enum dpu_cdm cdm_num)
+{
+ /* update pending flush only if CDM_0 is flushed */
+ if (cdm_num == CDM_0)
+ ctx->pending_flush_mask |= BIT(CDM_IDX);
+}
+
static void dpu_hw_ctl_update_pending_flush_wb_v1(struct dpu_hw_ctl *ctx,
enum dpu_wb wb)
{
@@ -307,6 +325,12 @@ static void dpu_hw_ctl_update_pending_flush_dsc_v1(struct dpu_hw_ctl *ctx,
ctx->pending_flush_mask |= BIT(DSC_IDX);
}
+static void dpu_hw_ctl_update_pending_flush_cdm_v1(struct dpu_hw_ctl *ctx, enum dpu_cdm cdm_num)
+{
+ ctx->pending_cdm_flush_mask |= BIT(cdm_num - CDM_0);
+ ctx->pending_flush_mask |= BIT(CDM_IDX);
+}
+
static void dpu_hw_ctl_update_pending_flush_dspp(struct dpu_hw_ctl *ctx,
enum dpu_dspp dspp, u32 dspp_sub_blk)
{
@@ -540,6 +564,9 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
if (cfg->dsc)
DPU_REG_WRITE(c, CTL_DSC_ACTIVE, cfg->dsc);
+
+ if (cfg->cdm)
+ DPU_REG_WRITE(c, CTL_CDM_ACTIVE, cfg->cdm);
}
static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
@@ -583,6 +610,7 @@ static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
u32 wb_active = 0;
u32 merge3d_active = 0;
u32 dsc_active;
+ u32 cdm_active;
/*
* This API resets each portion of the CTL path namely,
@@ -618,6 +646,12 @@ static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
dsc_active &= ~cfg->dsc;
DPU_REG_WRITE(c, CTL_DSC_ACTIVE, dsc_active);
}
+
+ if (cfg->cdm) {
+ cdm_active = DPU_REG_READ(c, CTL_CDM_ACTIVE);
+ cdm_active &= ~cfg->cdm;
+ DPU_REG_WRITE(c, CTL_CDM_ACTIVE, cdm_active);
+ }
}
static void dpu_hw_ctl_set_fetch_pipe_active(struct dpu_hw_ctl *ctx,
@@ -651,12 +685,14 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb_v1;
ops->update_pending_flush_dsc =
dpu_hw_ctl_update_pending_flush_dsc_v1;
+ ops->update_pending_flush_cdm = dpu_hw_ctl_update_pending_flush_cdm_v1;
} else {
ops->trigger_flush = dpu_hw_ctl_trigger_flush;
ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
ops->update_pending_flush_intf =
dpu_hw_ctl_update_pending_flush_intf;
ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb;
+ ops->update_pending_flush_cdm = dpu_hw_ctl_update_pending_flush_cdm;
}
ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
@@ -680,14 +716,15 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
ops->set_active_pipes = dpu_hw_ctl_set_fetch_pipe_active;
};
-struct dpu_hw_ctl *dpu_hw_ctl_init(const struct dpu_ctl_cfg *cfg,
- void __iomem *addr,
- u32 mixer_count,
- const struct dpu_lm_cfg *mixer)
+struct dpu_hw_ctl *dpu_hw_ctl_init(struct drm_device *dev,
+ const struct dpu_ctl_cfg *cfg,
+ void __iomem *addr,
+ u32 mixer_count,
+ const struct dpu_lm_cfg *mixer)
{
struct dpu_hw_ctl *c;
- c = kzalloc(sizeof(*c), GFP_KERNEL);
+ c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
@@ -702,8 +739,3 @@ struct dpu_hw_ctl *dpu_hw_ctl_init(const struct dpu_ctl_cfg *cfg,
return c;
}
-
-void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx)
-{
- kfree(ctx);
-}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
index 1c242298ff2e..ff85b5ee0acf 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
@@ -39,6 +39,7 @@ struct dpu_hw_stage_cfg {
* @mode_3d: 3d mux configuration
* @merge_3d: 3d merge block used
* @intf_mode_sel: Interface mode, cmd / vid
+ * @cdm: CDM block used
* @stream_sel: Stream selection for multi-stream interfaces
* @dsc: DSC BIT masks used
*/
@@ -48,6 +49,7 @@ struct dpu_hw_intf_cfg {
enum dpu_3d_blend_mode mode_3d;
enum dpu_merge_3d merge_3d;
enum dpu_ctl_mode_sel intf_mode_sel;
+ enum dpu_cdm cdm;
int stream_sel;
unsigned int dsc;
};
@@ -167,6 +169,14 @@ struct dpu_hw_ctl_ops {
enum dpu_dsc blk);
/**
+ * OR in the given flushbits to the cached pending_(cdm_)flush_mask
+ * No effect on hardware
+ * @ctx: ctl path ctx pointer
+ * @cdm_num: idx of cdm to be flushed
+ */
+ void (*update_pending_flush_cdm)(struct dpu_hw_ctl *ctx, enum dpu_cdm cdm_num);
+
+ /**
* Write the value of the pending_flush_mask to hardware
* @ctx : ctl path ctx pointer
*/
@@ -239,6 +249,7 @@ struct dpu_hw_ctl_ops {
* @pending_intf_flush_mask: pending INTF flush
* @pending_wb_flush_mask: pending WB flush
* @pending_dsc_flush_mask: pending DSC flush
+ * @pending_cdm_flush_mask: pending CDM flush
* @ops: operation list
*/
struct dpu_hw_ctl {
@@ -256,6 +267,7 @@ struct dpu_hw_ctl {
u32 pending_merge_3d_flush_mask;
u32 pending_dspp_flush_mask[DSPP_MAX - DSPP_0];
u32 pending_dsc_flush_mask;
+ u32 pending_cdm_flush_mask;
/* ops */
struct dpu_hw_ctl_ops ops;
@@ -274,20 +286,16 @@ static inline struct dpu_hw_ctl *to_dpu_hw_ctl(struct dpu_hw_blk *hw)
/**
* dpu_hw_ctl_init() - Initializes the ctl_path hw driver object.
* Should be called before accessing any ctl_path register.
+ * @dev: Corresponding device for devres management
* @cfg: ctl_path catalog entry for which driver object is required
* @addr: mapped register io address of MDP
* @mixer_count: Number of mixers in @mixer
* @mixer: Pointer to an array of Layer Mixers defined in the catalog
*/
-struct dpu_hw_ctl *dpu_hw_ctl_init(const struct dpu_ctl_cfg *cfg,
- void __iomem *addr,
- u32 mixer_count,
- const struct dpu_lm_cfg *mixer);
-
-/**
- * dpu_hw_ctl_destroy(): Destroys ctl driver context
- * should be called to free the context
- */
-void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx);
+struct dpu_hw_ctl *dpu_hw_ctl_init(struct drm_device *dev,
+ const struct dpu_ctl_cfg *cfg,
+ void __iomem *addr,
+ u32 mixer_count,
+ const struct dpu_lm_cfg *mixer);
#endif /*_DPU_HW_CTL_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
index 509dbaa51d87..5e9aad1b2aa2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
@@ -3,6 +3,8 @@
* Copyright (c) 2020-2022, Linaro Limited
*/
+#include <drm/drm_managed.h>
+
#include <drm/display/drm_dsc_helper.h>
#include "dpu_kms.h"
@@ -188,12 +190,13 @@ static void _setup_dsc_ops(struct dpu_hw_dsc_ops *ops,
ops->dsc_bind_pingpong_blk = dpu_hw_dsc_bind_pingpong_blk;
};
-struct dpu_hw_dsc *dpu_hw_dsc_init(const struct dpu_dsc_cfg *cfg,
+struct dpu_hw_dsc *dpu_hw_dsc_init(struct drm_device *dev,
+ const struct dpu_dsc_cfg *cfg,
void __iomem *addr)
{
struct dpu_hw_dsc *c;
- c = kzalloc(sizeof(*c), GFP_KERNEL);
+ c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
@@ -206,8 +209,3 @@ struct dpu_hw_dsc *dpu_hw_dsc_init(const struct dpu_dsc_cfg *cfg,
return c;
}
-
-void dpu_hw_dsc_destroy(struct dpu_hw_dsc *dsc)
-{
- kfree(dsc);
-}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
index d5b597ab8c5c..989c88d2449b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
@@ -64,20 +64,24 @@ struct dpu_hw_dsc {
/**
* dpu_hw_dsc_init() - Initializes the DSC hw driver object.
+ * @dev: Corresponding device for devres management
* @cfg: DSC catalog entry for which driver object is required
* @addr: Mapped register io address of MDP
* Return: Error code or allocated dpu_hw_dsc context
*/
-struct dpu_hw_dsc *dpu_hw_dsc_init(const struct dpu_dsc_cfg *cfg,
- void __iomem *addr);
+struct dpu_hw_dsc *dpu_hw_dsc_init(struct drm_device *dev,
+ const struct dpu_dsc_cfg *cfg,
+ void __iomem *addr);
/**
* dpu_hw_dsc_init_1_2() - initializes the v1.2 DSC hw driver object
+ * @dev: Corresponding device for devres management
* @cfg: DSC catalog entry for which driver object is required
* @addr: Mapped register io address of MDP
* Returns: Error code or allocated dpu_hw_dsc context
*/
-struct dpu_hw_dsc *dpu_hw_dsc_init_1_2(const struct dpu_dsc_cfg *cfg,
+struct dpu_hw_dsc *dpu_hw_dsc_init_1_2(struct drm_device *dev,
+ const struct dpu_dsc_cfg *cfg,
void __iomem *addr);
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c
index 24fe1d98eb86..ba193b0376fe 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c
@@ -4,6 +4,8 @@
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved
*/
+#include <drm/drm_managed.h>
+
#include <drm/display/drm_dsc_helper.h>
#include "dpu_kms.h"
@@ -367,12 +369,13 @@ static void _setup_dcs_ops_1_2(struct dpu_hw_dsc_ops *ops,
ops->dsc_bind_pingpong_blk = dpu_hw_dsc_bind_pingpong_blk_1_2;
}
-struct dpu_hw_dsc *dpu_hw_dsc_init_1_2(const struct dpu_dsc_cfg *cfg,
+struct dpu_hw_dsc *dpu_hw_dsc_init_1_2(struct drm_device *dev,
+ const struct dpu_dsc_cfg *cfg,
void __iomem *addr)
{
struct dpu_hw_dsc *c;
- c = kzalloc(sizeof(*c), GFP_KERNEL);
+ c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
index 9419b2209af8..b1da88e2935f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
@@ -2,6 +2,8 @@
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*/
+#include <drm/drm_managed.h>
+
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_lm.h"
@@ -68,15 +70,16 @@ static void _setup_dspp_ops(struct dpu_hw_dspp *c,
c->ops.setup_pcc = dpu_setup_dspp_pcc;
}
-struct dpu_hw_dspp *dpu_hw_dspp_init(const struct dpu_dspp_cfg *cfg,
- void __iomem *addr)
+struct dpu_hw_dspp *dpu_hw_dspp_init(struct drm_device *dev,
+ const struct dpu_dspp_cfg *cfg,
+ void __iomem *addr)
{
struct dpu_hw_dspp *c;
if (!addr)
return ERR_PTR(-EINVAL);
- c = kzalloc(sizeof(*c), GFP_KERNEL);
+ c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
@@ -90,10 +93,3 @@ struct dpu_hw_dspp *dpu_hw_dspp_init(const struct dpu_dspp_cfg *cfg,
return c;
}
-
-void dpu_hw_dspp_destroy(struct dpu_hw_dspp *dspp)
-{
- kfree(dspp);
-}
-
-
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h
index bea965681330..3b435690b6cc 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h
@@ -81,18 +81,14 @@ static inline struct dpu_hw_dspp *to_dpu_hw_dspp(struct dpu_hw_blk *hw)
/**
* dpu_hw_dspp_init() - Initializes the DSPP hw driver object.
* should be called once before accessing every DSPP.
+ * @dev: Corresponding device for devres management
* @cfg: DSPP catalog entry for which driver object is required
* @addr: Mapped register io address of MDP
* Return: pointer to structure or ERR_PTR
*/
-struct dpu_hw_dspp *dpu_hw_dspp_init(const struct dpu_dspp_cfg *cfg,
- void __iomem *addr);
-
-/**
- * dpu_hw_dspp_destroy(): Destroys DSPP driver context
- * @dspp: Pointer to DSPP driver context
- */
-void dpu_hw_dspp_destroy(struct dpu_hw_dspp *dspp);
+struct dpu_hw_dspp *dpu_hw_dspp_init(struct drm_device *dev,
+ const struct dpu_dspp_cfg *cfg,
+ void __iomem *addr);
#endif /*_DPU_HW_DSPP_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
index 088807db2c83..946dd0135dff 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
@@ -6,6 +6,8 @@
#include <linux/debugfs.h>
#include <linux/slab.h>
+#include <drm/drm_managed.h>
+
#include "dpu_core_irq.h"
#include "dpu_kms.h"
#include "dpu_hw_interrupts.h"
@@ -472,8 +474,9 @@ u32 dpu_core_irq_read(struct dpu_kms *dpu_kms,
return intr_status;
}
-struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
- const struct dpu_mdss_cfg *m)
+struct dpu_hw_intr *dpu_hw_intr_init(struct drm_device *dev,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m)
{
struct dpu_hw_intr *intr;
unsigned int i;
@@ -481,7 +484,7 @@ struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
if (!addr || !m)
return ERR_PTR(-EINVAL);
- intr = kzalloc(sizeof(*intr), GFP_KERNEL);
+ intr = drmm_kzalloc(dev, sizeof(*intr), GFP_KERNEL);
if (!intr)
return ERR_PTR(-ENOMEM);
@@ -512,11 +515,6 @@ struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
return intr;
}
-void dpu_hw_intr_destroy(struct dpu_hw_intr *intr)
-{
- kfree(intr);
-}
-
int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms,
unsigned int irq_idx,
void (*irq_cb)(void *arg),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
index 53a21ebc57e8..564b750a28fe 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
@@ -70,15 +70,12 @@ struct dpu_hw_intr {
/**
* dpu_hw_intr_init(): Initializes the interrupts hw object
+ * @dev: Corresponding device for devres management
* @addr: mapped register io address of MDP
* @m: pointer to MDSS catalog data
*/
-struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
- const struct dpu_mdss_cfg *m);
+struct dpu_hw_intr *dpu_hw_intr_init(struct drm_device *dev,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m);
-/**
- * dpu_hw_intr_destroy(): Cleanup interrutps hw object
- * @intr: pointer to interrupts hw object
- */
-void dpu_hw_intr_destroy(struct dpu_hw_intr *intr);
#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
index e8b8908d3e12..6bba531d6dc4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*/
@@ -12,6 +12,8 @@
#include <linux/iopoll.h>
+#include <drm/drm_managed.h>
+
#define INTF_TIMING_ENGINE_EN 0x000
#define INTF_CONFIG 0x004
#define INTF_HSYNC_CTL 0x008
@@ -318,9 +320,9 @@ static u32 dpu_hw_intf_get_line_count(struct dpu_hw_intf *intf)
return DPU_REG_READ(c, INTF_LINE_COUNT);
}
-static void dpu_hw_intf_setup_misr(struct dpu_hw_intf *intf, bool enable, u32 frame_count)
+static void dpu_hw_intf_setup_misr(struct dpu_hw_intf *intf)
{
- dpu_hw_setup_misr(&intf->hw, INTF_MISR_CTRL, enable, frame_count);
+ dpu_hw_setup_misr(&intf->hw, INTF_MISR_CTRL, 0x1);
}
static int dpu_hw_intf_collect_misr(struct dpu_hw_intf *intf, u32 *misr_value)
@@ -527,8 +529,10 @@ static void dpu_hw_intf_program_intf_cmd_cfg(struct dpu_hw_intf *ctx,
DPU_REG_WRITE(&ctx->hw, INTF_CONFIG2, intf_cfg2);
}
-struct dpu_hw_intf *dpu_hw_intf_init(const struct dpu_intf_cfg *cfg,
- void __iomem *addr, const struct dpu_mdss_version *mdss_rev)
+struct dpu_hw_intf *dpu_hw_intf_init(struct drm_device *dev,
+ const struct dpu_intf_cfg *cfg,
+ void __iomem *addr,
+ const struct dpu_mdss_version *mdss_rev)
{
struct dpu_hw_intf *c;
@@ -537,7 +541,7 @@ struct dpu_hw_intf *dpu_hw_intf_init(const struct dpu_intf_cfg *cfg,
return NULL;
}
- c = kzalloc(sizeof(*c), GFP_KERNEL);
+ c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
@@ -581,9 +585,3 @@ struct dpu_hw_intf *dpu_hw_intf_init(const struct dpu_intf_cfg *cfg,
return c;
}
-
-void dpu_hw_intf_destroy(struct dpu_hw_intf *intf)
-{
- kfree(intf);
-}
-
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
index c539025c418b..0bd57a32144a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*/
@@ -95,7 +95,7 @@ struct dpu_hw_intf_ops {
void (*bind_pingpong_blk)(struct dpu_hw_intf *intf,
const enum dpu_pingpong pp);
- void (*setup_misr)(struct dpu_hw_intf *intf, bool enable, u32 frame_count);
+ void (*setup_misr)(struct dpu_hw_intf *intf);
int (*collect_misr)(struct dpu_hw_intf *intf, u32 *misr_value);
// Tearcheck on INTF since DPU 5.0.0
@@ -131,17 +131,14 @@ struct dpu_hw_intf {
/**
* dpu_hw_intf_init() - Initializes the INTF driver for the passed
* interface catalog entry.
+ * @dev: Corresponding device for devres management
* @cfg: interface catalog entry for which driver object is required
* @addr: mapped register io address of MDP
* @mdss_rev: dpu core's major and minor versions
*/
-struct dpu_hw_intf *dpu_hw_intf_init(const struct dpu_intf_cfg *cfg,
- void __iomem *addr, const struct dpu_mdss_version *mdss_rev);
-
-/**
- * dpu_hw_intf_destroy(): Destroys INTF driver context
- * @intf: Pointer to INTF driver context
- */
-void dpu_hw_intf_destroy(struct dpu_hw_intf *intf);
+struct dpu_hw_intf *dpu_hw_intf_init(struct drm_device *dev,
+ const struct dpu_intf_cfg *cfg,
+ void __iomem *addr,
+ const struct dpu_mdss_version *mdss_rev);
#endif /*_DPU_HW_INTF_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
index d1c3bd8379ea..1d3ccf3228c6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
@@ -1,9 +1,11 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
*/
+#include <drm/drm_managed.h>
+
#include "dpu_kms.h"
#include "dpu_hw_catalog.h"
#include "dpu_hwio.h"
@@ -81,9 +83,9 @@ static void dpu_hw_lm_setup_border_color(struct dpu_hw_mixer *ctx,
}
}
-static void dpu_hw_lm_setup_misr(struct dpu_hw_mixer *ctx, bool enable, u32 frame_count)
+static void dpu_hw_lm_setup_misr(struct dpu_hw_mixer *ctx)
{
- dpu_hw_setup_misr(&ctx->hw, LM_MISR_CTRL, enable, frame_count);
+ dpu_hw_setup_misr(&ctx->hw, LM_MISR_CTRL, 0x0);
}
static int dpu_hw_lm_collect_misr(struct dpu_hw_mixer *ctx, u32 *misr_value)
@@ -156,8 +158,9 @@ static void _setup_mixer_ops(struct dpu_hw_lm_ops *ops,
ops->collect_misr = dpu_hw_lm_collect_misr;
}
-struct dpu_hw_mixer *dpu_hw_lm_init(const struct dpu_lm_cfg *cfg,
- void __iomem *addr)
+struct dpu_hw_mixer *dpu_hw_lm_init(struct drm_device *dev,
+ const struct dpu_lm_cfg *cfg,
+ void __iomem *addr)
{
struct dpu_hw_mixer *c;
@@ -166,7 +169,7 @@ struct dpu_hw_mixer *dpu_hw_lm_init(const struct dpu_lm_cfg *cfg,
return NULL;
}
- c = kzalloc(sizeof(*c), GFP_KERNEL);
+ c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
@@ -180,8 +183,3 @@ struct dpu_hw_mixer *dpu_hw_lm_init(const struct dpu_lm_cfg *cfg,
return c;
}
-
-void dpu_hw_lm_destroy(struct dpu_hw_mixer *lm)
-{
- kfree(lm);
-}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
index 36992d046a53..0a3381755249 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
@@ -1,5 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
*/
@@ -57,7 +58,7 @@ struct dpu_hw_lm_ops {
/**
* setup_misr: Enable/disable MISR
*/
- void (*setup_misr)(struct dpu_hw_mixer *ctx, bool enable, u32 frame_count);
+ void (*setup_misr)(struct dpu_hw_mixer *ctx);
/**
* collect_misr: Read MISR signature
@@ -95,16 +96,12 @@ static inline struct dpu_hw_mixer *to_dpu_hw_mixer(struct dpu_hw_blk *hw)
/**
* dpu_hw_lm_init() - Initializes the mixer hw driver object.
* should be called once before accessing every mixer.
+ * @dev: Corresponding device for devres management
* @cfg: mixer catalog entry for which driver object is required
* @addr: mapped register io address of MDP
*/
-struct dpu_hw_mixer *dpu_hw_lm_init(const struct dpu_lm_cfg *cfg,
- void __iomem *addr);
-
-/**
- * dpu_hw_lm_destroy(): Destroys layer mixer driver context
- * @lm: Pointer to LM driver context
- */
-void dpu_hw_lm_destroy(struct dpu_hw_mixer *lm);
+struct dpu_hw_mixer *dpu_hw_lm_init(struct drm_device *dev,
+ const struct dpu_lm_cfg *cfg,
+ void __iomem *addr);
#endif /*_DPU_HW_LM_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
index d85157acfbf8..5df545904057 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
@@ -98,6 +98,7 @@ enum dpu_hw_blk_type {
DPU_HW_BLK_DSPP,
DPU_HW_BLK_MERGE_3D,
DPU_HW_BLK_DSC,
+ DPU_HW_BLK_CDM,
DPU_HW_BLK_MAX,
};
@@ -185,6 +186,11 @@ enum dpu_dsc {
DSC_MAX
};
+enum dpu_cdm {
+ CDM_0 = 1,
+ CDM_MAX
+};
+
enum dpu_pingpong {
PINGPONG_NONE,
PINGPONG_0,
@@ -195,6 +201,8 @@ enum dpu_pingpong {
PINGPONG_5,
PINGPONG_6,
PINGPONG_7,
+ PINGPONG_8,
+ PINGPONG_9,
PINGPONG_S0,
PINGPONG_MAX
};
@@ -204,6 +212,7 @@ enum dpu_merge_3d {
MERGE_3D_1,
MERGE_3D_2,
MERGE_3D_3,
+ MERGE_3D_4,
MERGE_3D_MAX
};
@@ -458,6 +467,7 @@ struct dpu_mdss_color {
#define DPU_DBG_MASK_ROT (1 << 9)
#define DPU_DBG_MASK_DSPP (1 << 10)
#define DPU_DBG_MASK_DSC (1 << 11)
+#define DPU_DBG_MASK_CDM (1 << 12)
/**
* struct dpu_hw_tear_check - Struct contains parameters to configure
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c
index 90e0e05eff8d..ddfa40a959cb 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c
@@ -4,6 +4,8 @@
#include <linux/iopoll.h>
+#include <drm/drm_managed.h>
+
#include "dpu_hw_mdss.h"
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
@@ -37,12 +39,13 @@ static void _setup_merge_3d_ops(struct dpu_hw_merge_3d *c,
c->ops.setup_3d_mode = dpu_hw_merge_3d_setup_3d_mode;
};
-struct dpu_hw_merge_3d *dpu_hw_merge_3d_init(const struct dpu_merge_3d_cfg *cfg,
- void __iomem *addr)
+struct dpu_hw_merge_3d *dpu_hw_merge_3d_init(struct drm_device *dev,
+ const struct dpu_merge_3d_cfg *cfg,
+ void __iomem *addr)
{
struct dpu_hw_merge_3d *c;
- c = kzalloc(sizeof(*c), GFP_KERNEL);
+ c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
@@ -55,8 +58,3 @@ struct dpu_hw_merge_3d *dpu_hw_merge_3d_init(const struct dpu_merge_3d_cfg *cfg,
return c;
}
-
-void dpu_hw_merge_3d_destroy(struct dpu_hw_merge_3d *hw)
-{
- kfree(hw);
-}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h
index 19cec5e88722..c192f02ec1ab 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h
@@ -48,18 +48,13 @@ static inline struct dpu_hw_merge_3d *to_dpu_hw_merge_3d(struct dpu_hw_blk *hw)
/**
* dpu_hw_merge_3d_init() - Initializes the merge_3d driver for the passed
* merge3d catalog entry.
+ * @dev: Corresponding device for devres management
* @cfg: Pingpong catalog entry for which driver object is required
* @addr: Mapped register io address of MDP
* Return: Error code or allocated dpu_hw_merge_3d context
*/
-struct dpu_hw_merge_3d *dpu_hw_merge_3d_init(const struct dpu_merge_3d_cfg *cfg,
- void __iomem *addr);
-
-/**
- * dpu_hw_merge_3d_destroy - destroys merge_3d driver context
- * should be called to free the context
- * @pp: Pointer to PP driver context returned by dpu_hw_merge_3d_init
- */
-void dpu_hw_merge_3d_destroy(struct dpu_hw_merge_3d *pp);
+struct dpu_hw_merge_3d *dpu_hw_merge_3d_init(struct drm_device *dev,
+ const struct dpu_merge_3d_cfg *cfg,
+ void __iomem *addr);
#endif /*_DPU_HW_MERGE3D_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
index 057cac7f5d93..2db4c6fba37a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
@@ -4,6 +4,8 @@
#include <linux/iopoll.h>
+#include <drm/drm_managed.h>
+
#include "dpu_hw_mdss.h"
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
@@ -281,12 +283,14 @@ static int dpu_hw_pp_setup_dsc(struct dpu_hw_pingpong *pp)
return 0;
}
-struct dpu_hw_pingpong *dpu_hw_pingpong_init(const struct dpu_pingpong_cfg *cfg,
- void __iomem *addr, const struct dpu_mdss_version *mdss_rev)
+struct dpu_hw_pingpong *dpu_hw_pingpong_init(struct drm_device *dev,
+ const struct dpu_pingpong_cfg *cfg,
+ void __iomem *addr,
+ const struct dpu_mdss_version *mdss_rev)
{
struct dpu_hw_pingpong *c;
- c = kzalloc(sizeof(*c), GFP_KERNEL);
+ c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
@@ -317,8 +321,3 @@ struct dpu_hw_pingpong *dpu_hw_pingpong_init(const struct dpu_pingpong_cfg *cfg,
return c;
}
-
-void dpu_hw_pingpong_destroy(struct dpu_hw_pingpong *pp)
-{
- kfree(pp);
-}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
index 0d541ca5b056..a48b69fd79a3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
@@ -121,19 +121,15 @@ static inline struct dpu_hw_pingpong *to_dpu_hw_pingpong(struct dpu_hw_blk *hw)
/**
* dpu_hw_pingpong_init() - initializes the pingpong driver for the passed
* pingpong catalog entry.
+ * @dev: Corresponding device for devres management
* @cfg: Pingpong catalog entry for which driver object is required
* @addr: Mapped register io address of MDP
* @mdss_rev: dpu core's major and minor versions
* Return: Error code or allocated dpu_hw_pingpong context
*/
-struct dpu_hw_pingpong *dpu_hw_pingpong_init(const struct dpu_pingpong_cfg *cfg,
- void __iomem *addr, const struct dpu_mdss_version *mdss_rev);
-
-/**
- * dpu_hw_pingpong_destroy - destroys pingpong driver context
- * should be called to free the context
- * @pp: Pointer to PP driver context returned by dpu_hw_pingpong_init
- */
-void dpu_hw_pingpong_destroy(struct dpu_hw_pingpong *pp);
+struct dpu_hw_pingpong *dpu_hw_pingpong_init(struct drm_device *dev,
+ const struct dpu_pingpong_cfg *cfg,
+ void __iomem *addr,
+ const struct dpu_mdss_version *mdss_rev);
#endif /*_DPU_HW_PINGPONG_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
index 8e3c65989c49..0bf8a83e8df3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
@@ -11,6 +11,7 @@
#include "msm_mdss.h"
#include <drm/drm_file.h>
+#include <drm/drm_managed.h>
#define DPU_FETCH_CONFIG_RESET_VALUE 0x00000087
@@ -395,15 +396,6 @@ static void _dpu_hw_sspp_setup_scaler3(struct dpu_hw_sspp *ctx,
format);
}
-static u32 _dpu_hw_sspp_get_scaler3_ver(struct dpu_hw_sspp *ctx)
-{
- if (!ctx)
- return 0;
-
- return dpu_hw_get_scaler3_ver(&ctx->hw,
- ctx->cap->sblk->scaler_blk.base);
-}
-
/*
* dpu_hw_sspp_setup_rects()
*/
@@ -614,12 +606,8 @@ static void _setup_layer_ops(struct dpu_hw_sspp *c,
test_bit(DPU_SSPP_SMART_DMA_V2, &c->cap->features))
c->ops.setup_multirect = dpu_hw_sspp_setup_multirect;
- if (test_bit(DPU_SSPP_SCALER_QSEED3, &features) ||
- test_bit(DPU_SSPP_SCALER_QSEED3LITE, &features) ||
- test_bit(DPU_SSPP_SCALER_QSEED4, &features)) {
+ if (test_bit(DPU_SSPP_SCALER_QSEED3_COMPATIBLE, &features))
c->ops.setup_scaler = _dpu_hw_sspp_setup_scaler3;
- c->ops.get_scaler_ver = _dpu_hw_sspp_get_scaler3_ver;
- }
if (test_bit(DPU_SSPP_CDP, &features))
c->ops.setup_cdp = dpu_hw_sspp_setup_cdp;
@@ -654,10 +642,7 @@ int _dpu_hw_sspp_init_debugfs(struct dpu_hw_sspp *hw_pipe, struct dpu_kms *kms,
cfg->len,
kms);
- if (cfg->features & BIT(DPU_SSPP_SCALER_QSEED3) ||
- cfg->features & BIT(DPU_SSPP_SCALER_QSEED3LITE) ||
- cfg->features & BIT(DPU_SSPP_SCALER_QSEED2) ||
- cfg->features & BIT(DPU_SSPP_SCALER_QSEED4))
+ if (sblk->scaler_blk.len)
dpu_debugfs_create_regset32("scaler_blk", 0400,
debugfs_root,
sblk->scaler_blk.base + cfg->base,
@@ -685,16 +670,18 @@ int _dpu_hw_sspp_init_debugfs(struct dpu_hw_sspp *hw_pipe, struct dpu_kms *kms,
}
#endif
-struct dpu_hw_sspp *dpu_hw_sspp_init(const struct dpu_sspp_cfg *cfg,
- void __iomem *addr, const struct msm_mdss_data *mdss_data,
- const struct dpu_mdss_version *mdss_rev)
+struct dpu_hw_sspp *dpu_hw_sspp_init(struct drm_device *dev,
+ const struct dpu_sspp_cfg *cfg,
+ void __iomem *addr,
+ const struct msm_mdss_data *mdss_data,
+ const struct dpu_mdss_version *mdss_rev)
{
struct dpu_hw_sspp *hw_pipe;
if (!addr)
return ERR_PTR(-EINVAL);
- hw_pipe = kzalloc(sizeof(*hw_pipe), GFP_KERNEL);
+ hw_pipe = drmm_kzalloc(dev, sizeof(*hw_pipe), GFP_KERNEL);
if (!hw_pipe)
return ERR_PTR(-ENOMEM);
@@ -709,9 +696,3 @@ struct dpu_hw_sspp *dpu_hw_sspp_init(const struct dpu_sspp_cfg *cfg,
return hw_pipe;
}
-
-void dpu_hw_sspp_destroy(struct dpu_hw_sspp *ctx)
-{
- kfree(ctx);
-}
-
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
index f93969fddb22..b7dc52312c39 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
@@ -22,21 +22,6 @@ struct dpu_hw_sspp;
#define DPU_SSPP_SOLID_FILL BIT(4)
/**
- * Define all scaler feature bits in catalog
- */
-#define DPU_SSPP_SCALER (BIT(DPU_SSPP_SCALER_RGB) | \
- BIT(DPU_SSPP_SCALER_QSEED2) | \
- BIT(DPU_SSPP_SCALER_QSEED3) | \
- BIT(DPU_SSPP_SCALER_QSEED3LITE) | \
- BIT(DPU_SSPP_SCALER_QSEED4))
-
-/*
- * Define all CSC feature bits in catalog
- */
-#define DPU_SSPP_CSC_ANY (BIT(DPU_SSPP_CSC) | \
- BIT(DPU_SSPP_CSC_10BIT))
-
-/**
* Component indices
*/
enum {
@@ -297,12 +282,6 @@ struct dpu_hw_sspp_ops {
const struct dpu_format *format);
/**
- * get_scaler_ver - get scaler h/w version
- * @ctx: Pointer to pipe context
- */
- u32 (*get_scaler_ver)(struct dpu_hw_sspp *ctx);
-
- /**
* setup_cdp - setup client driven prefetch
* @pipe: Pointer to software pipe context
* @fmt: format used by the sw pipe
@@ -339,21 +318,17 @@ struct dpu_kms;
/**
* dpu_hw_sspp_init() - Initializes the sspp hw driver object.
* Should be called once before accessing every pipe.
+ * @dev: Corresponding device for devres management
* @cfg: Pipe catalog entry for which driver object is required
* @addr: Mapped register io address of MDP
* @mdss_data: UBWC / MDSS configuration data
* @mdss_rev: dpu core's major and minor versions
*/
-struct dpu_hw_sspp *dpu_hw_sspp_init(const struct dpu_sspp_cfg *cfg,
- void __iomem *addr, const struct msm_mdss_data *mdss_data,
- const struct dpu_mdss_version *mdss_rev);
-
-/**
- * dpu_hw_sspp_destroy(): Destroys SSPP driver context
- * should be called during Hw pipe cleanup.
- * @ctx: Pointer to SSPP driver context returned by dpu_hw_sspp_init
- */
-void dpu_hw_sspp_destroy(struct dpu_hw_sspp *ctx);
+struct dpu_hw_sspp *dpu_hw_sspp_init(struct drm_device *dev,
+ const struct dpu_sspp_cfg *cfg,
+ void __iomem *addr,
+ const struct msm_mdss_data *mdss_data,
+ const struct dpu_mdss_version *mdss_rev);
int _dpu_hw_sspp_init_debugfs(struct dpu_hw_sspp *hw_pipe, struct dpu_kms *kms,
struct dentry *entry);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
index 24e734768a72..05e48cf4ec1d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
@@ -2,6 +2,8 @@
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*/
+#include <drm/drm_managed.h>
+
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_top.h"
@@ -247,16 +249,17 @@ static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
ops->intf_audio_select = dpu_hw_intf_audio_select;
}
-struct dpu_hw_mdp *dpu_hw_mdptop_init(const struct dpu_mdp_cfg *cfg,
- void __iomem *addr,
- const struct dpu_mdss_cfg *m)
+struct dpu_hw_mdp *dpu_hw_mdptop_init(struct drm_device *dev,
+ const struct dpu_mdp_cfg *cfg,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m)
{
struct dpu_hw_mdp *mdp;
if (!addr)
return ERR_PTR(-EINVAL);
- mdp = kzalloc(sizeof(*mdp), GFP_KERNEL);
+ mdp = drmm_kzalloc(dev, sizeof(*mdp), GFP_KERNEL);
if (!mdp)
return ERR_PTR(-ENOMEM);
@@ -271,9 +274,3 @@ struct dpu_hw_mdp *dpu_hw_mdptop_init(const struct dpu_mdp_cfg *cfg,
return mdp;
}
-
-void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp)
-{
- kfree(mdp);
-}
-
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
index 8b1463d2b2f0..6f3dc98087df 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
@@ -145,13 +145,15 @@ struct dpu_hw_mdp {
/**
* dpu_hw_mdptop_init - initializes the top driver for the passed config
+ * @dev: Corresponding device for devres management
* @cfg: MDP TOP configuration from catalog
* @addr: Mapped register io address of MDP
* @m: Pointer to mdss catalog data
*/
-struct dpu_hw_mdp *dpu_hw_mdptop_init(const struct dpu_mdp_cfg *cfg,
- void __iomem *addr,
- const struct dpu_mdss_cfg *m);
+struct dpu_hw_mdp *dpu_hw_mdptop_init(struct drm_device *dev,
+ const struct dpu_mdp_cfg *cfg,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m);
void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
index 18b16b2d2bf5..dd475827314e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
@@ -381,12 +381,6 @@ end:
DPU_REG_WRITE(c, QSEED3_OP_MODE + scaler_offset, op_mode);
}
-u32 dpu_hw_get_scaler3_ver(struct dpu_hw_blk_reg_map *c,
- u32 scaler_offset)
-{
- return DPU_REG_READ(c, QSEED3_HW_VERSION + scaler_offset);
-}
-
void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c,
u32 csc_reg_off,
const struct dpu_csc_cfg *data, bool csc10)
@@ -481,9 +475,11 @@ void _dpu_hw_setup_qos_lut(struct dpu_hw_blk_reg_map *c, u32 offset,
cfg->danger_safe_en ? QOS_QOS_CTRL_DANGER_SAFE_EN : 0);
}
+/*
+ * note: Aside from encoders, input_sel should be set to 0x0 by default
+ */
void dpu_hw_setup_misr(struct dpu_hw_blk_reg_map *c,
- u32 misr_ctrl_offset,
- bool enable, u32 frame_count)
+ u32 misr_ctrl_offset, u8 input_sel)
{
u32 config = 0;
@@ -492,15 +488,9 @@ void dpu_hw_setup_misr(struct dpu_hw_blk_reg_map *c,
/* Clear old MISR value (in case it's read before a new value is calculated)*/
wmb();
- if (enable) {
- config = (frame_count & MISR_FRAME_COUNT_MASK) |
- MISR_CTRL_ENABLE | MISR_CTRL_FREE_RUN_MASK;
-
- DPU_REG_WRITE(c, misr_ctrl_offset, config);
- } else {
- DPU_REG_WRITE(c, misr_ctrl_offset, 0);
- }
-
+ config = MISR_FRAME_COUNT | MISR_CTRL_ENABLE | MISR_CTRL_FREE_RUN_MASK |
+ ((input_sel & 0xF) << 24);
+ DPU_REG_WRITE(c, misr_ctrl_offset, config);
}
int dpu_hw_collect_misr(struct dpu_hw_blk_reg_map *c,
@@ -567,3 +557,47 @@ bool dpu_hw_clk_force_ctrl(struct dpu_hw_blk_reg_map *c,
return clk_forced_on;
}
+
+#define TO_S15D16(_x_)((_x_) << 7)
+
+const struct dpu_csc_cfg dpu_csc_YUV2RGB_601L = {
+ {
+ /* S15.16 format */
+ 0x00012A00, 0x00000000, 0x00019880,
+ 0x00012A00, 0xFFFF9B80, 0xFFFF3000,
+ 0x00012A00, 0x00020480, 0x00000000,
+ },
+ /* signed bias */
+ { 0xfff0, 0xff80, 0xff80,},
+ { 0x0, 0x0, 0x0,},
+ /* unsigned clamp */
+ { 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0,},
+ { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,},
+};
+
+const struct dpu_csc_cfg dpu_csc10_YUV2RGB_601L = {
+ {
+ /* S15.16 format */
+ 0x00012A00, 0x00000000, 0x00019880,
+ 0x00012A00, 0xFFFF9B80, 0xFFFF3000,
+ 0x00012A00, 0x00020480, 0x00000000,
+ },
+ /* signed bias */
+ { 0xffc0, 0xfe00, 0xfe00,},
+ { 0x0, 0x0, 0x0,},
+ /* unsigned clamp */
+ { 0x40, 0x3ac, 0x40, 0x3c0, 0x40, 0x3c0,},
+ { 0x00, 0x3ff, 0x00, 0x3ff, 0x00, 0x3ff,},
+};
+
+const struct dpu_csc_cfg dpu_csc10_rgb2yuv_601l = {
+ {
+ TO_S15D16(0x0083), TO_S15D16(0x0102), TO_S15D16(0x0032),
+ TO_S15D16(0x1fb5), TO_S15D16(0x1f6c), TO_S15D16(0x00e1),
+ TO_S15D16(0x00e1), TO_S15D16(0x1f45), TO_S15D16(0x1fdc)
+ },
+ { 0x00, 0x00, 0x00 },
+ { 0x0040, 0x0200, 0x0200 },
+ { 0x000, 0x3ff, 0x000, 0x3ff, 0x000, 0x3ff },
+ { 0x040, 0x3ac, 0x040, 0x3c0, 0x040, 0x3c0 },
+};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
index 4bea139081bc..64ded69fa903 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
*/
@@ -13,12 +13,18 @@
#include "dpu_hw_catalog.h"
#define REG_MASK(n) ((BIT(n)) - 1)
-#define MISR_FRAME_COUNT_MASK 0xFF
+#define MISR_FRAME_COUNT 0x1
#define MISR_CTRL_ENABLE BIT(8)
#define MISR_CTRL_STATUS BIT(9)
#define MISR_CTRL_STATUS_CLEAR BIT(10)
#define MISR_CTRL_FREE_RUN_MASK BIT(31)
+#define TO_S15D16(_x_)((_x_) << 7)
+
+extern const struct dpu_csc_cfg dpu_csc_YUV2RGB_601L;
+extern const struct dpu_csc_cfg dpu_csc10_YUV2RGB_601L;
+extern const struct dpu_csc_cfg dpu_csc10_rgb2yuv_601l;
+
/*
* This is the common struct maintained by each sub block
* for mapping the register offsets in this block to the
@@ -340,9 +346,6 @@ void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c,
u32 scaler_offset, u32 scaler_version,
const struct dpu_format *format);
-u32 dpu_hw_get_scaler3_ver(struct dpu_hw_blk_reg_map *c,
- u32 scaler_offset);
-
void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c,
u32 csc_reg_off,
const struct dpu_csc_cfg *data, bool csc10);
@@ -358,9 +361,7 @@ void _dpu_hw_setup_qos_lut(struct dpu_hw_blk_reg_map *c, u32 offset,
const struct dpu_hw_qos_cfg *cfg);
void dpu_hw_setup_misr(struct dpu_hw_blk_reg_map *c,
- u32 misr_ctrl_offset,
- bool enable,
- u32 frame_count);
+ u32 misr_ctrl_offset, u8 input_sel);
int dpu_hw_collect_misr(struct dpu_hw_blk_reg_map *c,
u32 misr_ctrl_offset,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
index a5121a50b2bb..98e34afde2d2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
@@ -2,6 +2,8 @@
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*/
+#include <drm/drm_managed.h>
+
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_vbif.h"
@@ -211,12 +213,13 @@ static void _setup_vbif_ops(struct dpu_hw_vbif_ops *ops,
ops->set_write_gather_en = dpu_hw_set_write_gather_en;
}
-struct dpu_hw_vbif *dpu_hw_vbif_init(const struct dpu_vbif_cfg *cfg,
- void __iomem *addr)
+struct dpu_hw_vbif *dpu_hw_vbif_init(struct drm_device *dev,
+ const struct dpu_vbif_cfg *cfg,
+ void __iomem *addr)
{
struct dpu_hw_vbif *c;
- c = kzalloc(sizeof(*c), GFP_KERNEL);
+ c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
@@ -234,8 +237,3 @@ struct dpu_hw_vbif *dpu_hw_vbif_init(const struct dpu_vbif_cfg *cfg,
return c;
}
-
-void dpu_hw_vbif_destroy(struct dpu_hw_vbif *vbif)
-{
- kfree(vbif);
-}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h
index 7e10d2a172b4..e2b4307500e4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h
@@ -108,12 +108,12 @@ struct dpu_hw_vbif {
/**
* dpu_hw_vbif_init() - Initializes the VBIF driver for the passed
* VBIF catalog entry.
+ * @dev: Corresponding device for devres management
* @cfg: VBIF catalog entry for which driver object is required
* @addr: Mapped register io address of MDSS
*/
-struct dpu_hw_vbif *dpu_hw_vbif_init(const struct dpu_vbif_cfg *cfg,
- void __iomem *addr);
-
-void dpu_hw_vbif_destroy(struct dpu_hw_vbif *vbif);
+struct dpu_hw_vbif *dpu_hw_vbif_init(struct drm_device *dev,
+ const struct dpu_vbif_cfg *cfg,
+ void __iomem *addr);
#endif /*_DPU_HW_VBIF_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
index 9668fb97c047..e75995f7fcea 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
@@ -3,6 +3,8 @@
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved
*/
+#include <drm/drm_managed.h>
+
#include "dpu_hw_mdss.h"
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
@@ -87,6 +89,9 @@ static void dpu_hw_wb_setup_format(struct dpu_hw_wb *ctx,
dst_format |= BIT(14); /* DST_ALPHA_X */
}
+ if (DPU_FORMAT_IS_YUV(fmt))
+ dst_format |= BIT(15);
+
pattern = (fmt->element[3] << 24) |
(fmt->element[2] << 16) |
(fmt->element[1] << 8) |
@@ -208,15 +213,17 @@ static void _setup_wb_ops(struct dpu_hw_wb_ops *ops,
ops->setup_clk_force_ctrl = dpu_hw_wb_setup_clk_force_ctrl;
}
-struct dpu_hw_wb *dpu_hw_wb_init(const struct dpu_wb_cfg *cfg,
- void __iomem *addr, const struct dpu_mdss_version *mdss_rev)
+struct dpu_hw_wb *dpu_hw_wb_init(struct drm_device *dev,
+ const struct dpu_wb_cfg *cfg,
+ void __iomem *addr,
+ const struct dpu_mdss_version *mdss_rev)
{
struct dpu_hw_wb *c;
if (!addr)
return ERR_PTR(-EINVAL);
- c = kzalloc(sizeof(*c), GFP_KERNEL);
+ c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
@@ -230,8 +237,3 @@ struct dpu_hw_wb *dpu_hw_wb_init(const struct dpu_wb_cfg *cfg,
return c;
}
-
-void dpu_hw_wb_destroy(struct dpu_hw_wb *hw_wb)
-{
- kfree(hw_wb);
-}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h
index 88792f450a92..e671796ea379 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h
@@ -76,18 +76,15 @@ struct dpu_hw_wb {
/**
* dpu_hw_wb_init() - Initializes the writeback hw driver object.
+ * @dev: Corresponding device for devres management
* @cfg: wb_path catalog entry for which driver object is required
* @addr: mapped register io address of MDP
* @mdss_rev: dpu core's major and minor versions
* Return: Error code or allocated dpu_hw_wb context
*/
-struct dpu_hw_wb *dpu_hw_wb_init(const struct dpu_wb_cfg *cfg,
- void __iomem *addr, const struct dpu_mdss_version *mdss_rev);
-
-/**
- * dpu_hw_wb_destroy(): Destroy writeback hw driver object.
- * @hw_wb: Pointer to writeback hw driver object
- */
-void dpu_hw_wb_destroy(struct dpu_hw_wb *hw_wb);
+struct dpu_hw_wb *dpu_hw_wb_init(struct drm_device *dev,
+ const struct dpu_wb_cfg *cfg,
+ void __iomem *addr,
+ const struct dpu_mdss_version *mdss_rev);
#endif /*_DPU_HW_WB_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index fe7267b3bff5..723cc1d82143 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -274,9 +274,6 @@ static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
void *p = dpu_hw_util_get_log_mask_ptr();
struct dentry *entry;
- struct drm_device *dev;
- struct msm_drm_private *priv;
- int i;
if (!p)
return -EINVAL;
@@ -285,9 +282,6 @@ static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
if (minor->type != DRM_MINOR_PRIMARY)
return 0;
- dev = dpu_kms->dev;
- priv = dev->dev_private;
-
entry = debugfs_create_dir("debug", minor->debugfs_root);
debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, entry, p);
@@ -297,11 +291,6 @@ static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
dpu_debugfs_core_irq_init(dpu_kms, entry);
dpu_debugfs_sspp_init(dpu_kms, entry);
- for (i = 0; i < ARRAY_SIZE(priv->dp); i++) {
- if (priv->dp[i])
- msm_dp_debugfs_init(priv->dp[i], minor);
- }
-
return dpu_core_perf_debugfs_init(dpu_kms, entry);
}
#endif
@@ -597,7 +586,6 @@ static int _dpu_kms_initialize_displayport(struct drm_device *dev,
rc = msm_dp_modeset_init(priv->dp[i], dev, encoder);
if (rc) {
DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc);
- drm_encoder_cleanup(encoder);
return rc;
}
}
@@ -630,7 +618,6 @@ static int _dpu_kms_initialize_hdmi(struct drm_device *dev,
rc = msm_hdmi_modeset_init(priv->hdmi, dev, encoder);
if (rc) {
DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc);
- drm_encoder_cleanup(encoder);
return rc;
}
@@ -662,7 +649,6 @@ static int _dpu_kms_initialize_writeback(struct drm_device *dev,
n_formats);
if (rc) {
DPU_ERROR("dpu_writeback_init, rc = %d\n", rc);
- drm_encoder_cleanup(encoder);
return rc;
}
@@ -806,30 +792,17 @@ static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
{
int i;
- if (dpu_kms->hw_intr)
- dpu_hw_intr_destroy(dpu_kms->hw_intr);
dpu_kms->hw_intr = NULL;
/* safe to call these more than once during shutdown */
_dpu_kms_mmu_destroy(dpu_kms);
- if (dpu_kms->catalog) {
- for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
- if (dpu_kms->hw_vbif[i]) {
- dpu_hw_vbif_destroy(dpu_kms->hw_vbif[i]);
- dpu_kms->hw_vbif[i] = NULL;
- }
- }
+ for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
+ dpu_kms->hw_vbif[i] = NULL;
}
- if (dpu_kms->rm_init)
- dpu_rm_destroy(&dpu_kms->rm);
- dpu_kms->rm_init = false;
-
dpu_kms->catalog = NULL;
- if (dpu_kms->hw_mdp)
- dpu_hw_mdp_destroy(dpu_kms->hw_mdp);
dpu_kms->hw_mdp = NULL;
}
@@ -856,7 +829,6 @@ static int dpu_irq_postinstall(struct msm_kms *kms)
{
struct msm_drm_private *priv;
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
- int i;
if (!dpu_kms || !dpu_kms->dev)
return -EINVAL;
@@ -865,9 +837,6 @@ static int dpu_irq_postinstall(struct msm_kms *kms)
if (!priv)
return -EINVAL;
- for (i = 0; i < ARRAY_SIZE(priv->dp); i++)
- msm_dp_irq_postinstall(priv->dp[i]);
-
return 0;
}
@@ -975,6 +944,10 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k
}
}
+ if (cat->cdm)
+ msm_disp_snapshot_add_block(disp_state, cat->cdm->len,
+ dpu_kms->mmio + cat->cdm->base, cat->cdm->name);
+
pm_runtime_put_sync(&dpu_kms->pdev->dev);
}
@@ -1078,7 +1051,7 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
if (!dpu_kms->catalog) {
DPU_ERROR("device config not known!\n");
rc = -EINVAL;
- goto power_error;
+ goto err_pm_put;
}
/*
@@ -1088,49 +1061,48 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
rc = _dpu_kms_mmu_init(dpu_kms);
if (rc) {
DPU_ERROR("dpu_kms_mmu_init failed: %d\n", rc);
- goto power_error;
+ goto err_pm_put;
}
dpu_kms->mdss = msm_mdss_get_mdss_data(dpu_kms->pdev->dev.parent);
if (IS_ERR(dpu_kms->mdss)) {
rc = PTR_ERR(dpu_kms->mdss);
DPU_ERROR("failed to get MDSS data: %d\n", rc);
- goto power_error;
+ goto err_pm_put;
}
if (!dpu_kms->mdss) {
rc = -EINVAL;
DPU_ERROR("NULL MDSS data\n");
- goto power_error;
+ goto err_pm_put;
}
- rc = dpu_rm_init(&dpu_kms->rm, dpu_kms->catalog, dpu_kms->mdss, dpu_kms->mmio);
+ rc = dpu_rm_init(dev, &dpu_kms->rm, dpu_kms->catalog, dpu_kms->mdss, dpu_kms->mmio);
if (rc) {
DPU_ERROR("rm init failed: %d\n", rc);
- goto power_error;
+ goto err_pm_put;
}
- dpu_kms->rm_init = true;
-
- dpu_kms->hw_mdp = dpu_hw_mdptop_init(dpu_kms->catalog->mdp,
+ dpu_kms->hw_mdp = dpu_hw_mdptop_init(dev,
+ dpu_kms->catalog->mdp,
dpu_kms->mmio,
dpu_kms->catalog);
if (IS_ERR(dpu_kms->hw_mdp)) {
rc = PTR_ERR(dpu_kms->hw_mdp);
DPU_ERROR("failed to get hw_mdp: %d\n", rc);
dpu_kms->hw_mdp = NULL;
- goto power_error;
+ goto err_pm_put;
}
for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
struct dpu_hw_vbif *hw;
const struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i];
- hw = dpu_hw_vbif_init(vbif, dpu_kms->vbif[vbif->id]);
+ hw = dpu_hw_vbif_init(dev, vbif, dpu_kms->vbif[vbif->id]);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed to init vbif %d: %d\n", vbif->id, rc);
- goto power_error;
+ goto err_pm_put;
}
dpu_kms->hw_vbif[vbif->id] = hw;
@@ -1146,15 +1118,15 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
rc = dpu_core_perf_init(&dpu_kms->perf, dpu_kms->catalog->perf, max_core_clk_rate);
if (rc) {
DPU_ERROR("failed to init perf %d\n", rc);
- goto perf_err;
+ goto err_pm_put;
}
- dpu_kms->hw_intr = dpu_hw_intr_init(dpu_kms->mmio, dpu_kms->catalog);
- if (IS_ERR_OR_NULL(dpu_kms->hw_intr)) {
+ dpu_kms->hw_intr = dpu_hw_intr_init(dev, dpu_kms->mmio, dpu_kms->catalog);
+ if (IS_ERR(dpu_kms->hw_intr)) {
rc = PTR_ERR(dpu_kms->hw_intr);
DPU_ERROR("hw_intr init failed: %d\n", rc);
dpu_kms->hw_intr = NULL;
- goto hw_intr_init_err;
+ goto err_pm_put;
}
dev->mode_config.min_width = 0;
@@ -1179,7 +1151,7 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
rc = _dpu_kms_drm_obj_init(dpu_kms);
if (rc) {
DPU_ERROR("modeset init failed: %d\n", rc);
- goto drm_obj_init_err;
+ goto err_pm_put;
}
dpu_vbif_init_memtypes(dpu_kms);
@@ -1188,10 +1160,7 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
return 0;
-drm_obj_init_err:
-hw_intr_init_err:
-perf_err:
-power_error:
+err_pm_put:
pm_runtime_put_sync(&dpu_kms->pdev->dev);
error:
_dpu_kms_hw_destroy(dpu_kms);
@@ -1349,6 +1318,7 @@ static const struct dev_pm_ops dpu_pm_ops = {
static const struct of_device_id dpu_dt_match[] = {
{ .compatible = "qcom,msm8998-dpu", .data = &dpu_msm8998_cfg, },
{ .compatible = "qcom,qcm2290-dpu", .data = &dpu_qcm2290_cfg, },
+ { .compatible = "qcom,sdm670-dpu", .data = &dpu_sdm670_cfg, },
{ .compatible = "qcom,sdm845-dpu", .data = &dpu_sdm845_cfg, },
{ .compatible = "qcom,sc7180-dpu", .data = &dpu_sc7180_cfg, },
{ .compatible = "qcom,sc7280-dpu", .data = &dpu_sc7280_cfg, },
@@ -1363,6 +1333,7 @@ static const struct of_device_id dpu_dt_match[] = {
{ .compatible = "qcom,sm8350-dpu", .data = &dpu_sm8350_cfg, },
{ .compatible = "qcom,sm8450-dpu", .data = &dpu_sm8450_cfg, },
{ .compatible = "qcom,sm8550-dpu", .data = &dpu_sm8550_cfg, },
+ { .compatible = "qcom,sm8650-dpu", .data = &dpu_sm8650_cfg, },
{}
};
MODULE_DEVICE_TABLE(of, dpu_dt_match);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index b6f53ca6e962..d1207f4ec3ae 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -51,6 +51,7 @@
} while (0)
#define DPU_ERROR(fmt, ...) pr_err("[dpu error]" fmt, ##__VA_ARGS__)
+#define DPU_ERROR_RATELIMITED(fmt, ...) pr_err_ratelimited("[dpu error]" fmt, ##__VA_ARGS__)
/**
* ktime_compare_safe - compare two ktime structures
@@ -88,7 +89,6 @@ struct dpu_kms {
struct drm_private_obj global_state;
struct dpu_rm rm;
- bool rm_init;
struct dpu_hw_vbif *hw_vbif[VBIF_MAX];
struct dpu_hw_mdp *hw_mdp;
@@ -136,6 +136,7 @@ struct dpu_global_state {
uint32_t ctl_to_enc_id[CTL_MAX - CTL_0];
uint32_t dspp_to_enc_id[DSPP_MAX - DSPP_0];
uint32_t dsc_to_enc_id[DSC_MAX - DSC_0];
+ uint32_t cdm_to_enc_id;
};
struct dpu_global_state
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index 3eef5e025e12..ff975ad51145 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -21,6 +21,7 @@
#include "dpu_kms.h"
#include "dpu_formats.h"
#include "dpu_hw_sspp.h"
+#include "dpu_hw_util.h"
#include "dpu_trace.h"
#include "dpu_crtc.h"
#include "dpu_vbif.h"
@@ -78,8 +79,6 @@ static const uint32_t qcom_compressed_supported_formats[] = {
struct dpu_plane {
struct drm_plane base;
- struct mutex lock;
-
enum dpu_sspp pipe;
uint32_t color_fill;
@@ -470,8 +469,7 @@ static void _dpu_plane_setup_scaler3(struct dpu_hw_sspp *pipe_hw,
scale_cfg->src_height[i] /= chroma_subsmpl_v;
}
- if (pipe_hw->cap->features &
- BIT(DPU_SSPP_SCALER_QSEED4)) {
+ if (pipe_hw->cap->sblk->scaler_blk.version >= 0x3000) {
scale_cfg->preload_x[i] = DPU_QSEED4_DEFAULT_PRELOAD_H;
scale_cfg->preload_y[i] = DPU_QSEED4_DEFAULT_PRELOAD_V;
} else {
@@ -511,36 +509,6 @@ static void _dpu_plane_setup_pixel_ext(struct dpu_hw_scaler3_cfg *scale_cfg,
}
}
-static const struct dpu_csc_cfg dpu_csc_YUV2RGB_601L = {
- {
- /* S15.16 format */
- 0x00012A00, 0x00000000, 0x00019880,
- 0x00012A00, 0xFFFF9B80, 0xFFFF3000,
- 0x00012A00, 0x00020480, 0x00000000,
- },
- /* signed bias */
- { 0xfff0, 0xff80, 0xff80,},
- { 0x0, 0x0, 0x0,},
- /* unsigned clamp */
- { 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0,},
- { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,},
-};
-
-static const struct dpu_csc_cfg dpu_csc10_YUV2RGB_601L = {
- {
- /* S15.16 format */
- 0x00012A00, 0x00000000, 0x00019880,
- 0x00012A00, 0xFFFF9B80, 0xFFFF3000,
- 0x00012A00, 0x00020480, 0x00000000,
- },
- /* signed bias */
- { 0xffc0, 0xfe00, 0xfe00,},
- { 0x0, 0x0, 0x0,},
- /* unsigned clamp */
- { 0x40, 0x3ac, 0x40, 0x3c0, 0x40, 0x3c0,},
- { 0x00, 0x3ff, 0x00, 0x3ff, 0x00, 0x3ff,},
-};
-
static const struct dpu_csc_cfg *_dpu_plane_get_csc(struct dpu_sw_pipe *pipe,
const struct dpu_format *fmt)
{
@@ -774,8 +742,8 @@ static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu,
min_src_size = DPU_FORMAT_IS_YUV(fmt) ? 2 : 1;
if (DPU_FORMAT_IS_YUV(fmt) &&
- (!(pipe->sspp->cap->features & DPU_SSPP_SCALER) ||
- !(pipe->sspp->cap->features & DPU_SSPP_CSC_ANY))) {
+ (!pipe->sspp->cap->sblk->scaler_blk.len ||
+ !pipe->sspp->cap->sblk->csc_blk.len)) {
DPU_DEBUG_PLANE(pdpu,
"plane doesn't have scaler/csc for yuv\n");
return -EINVAL;
@@ -824,6 +792,8 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
plane);
int ret = 0, min_scale;
struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
+ u64 max_mdp_clk_rate = kms->perf.max_core_clk_rate;
struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state);
struct dpu_sw_pipe *pipe = &pstate->pipe;
struct dpu_sw_pipe *r_pipe = &pstate->r_pipe;
@@ -892,14 +862,16 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
max_linewidth = pdpu->catalog->caps->max_linewidth;
- if (drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) {
+ if ((drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) ||
+ _dpu_plane_calc_clk(&crtc_state->adjusted_mode, pipe_cfg) > max_mdp_clk_rate) {
/*
* In parallel multirect case only the half of the usual width
* is supported for tiled formats. If we are here, we know that
* full width is more than max_linewidth, thus each rect is
* wider than allowed.
*/
- if (DPU_FORMAT_IS_UBWC(fmt)) {
+ if (DPU_FORMAT_IS_UBWC(fmt) &&
+ drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) {
DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u, tiled format\n",
DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth);
return -E2BIG;
@@ -1213,29 +1185,6 @@ static void dpu_plane_atomic_update(struct drm_plane *plane,
}
}
-static void dpu_plane_destroy(struct drm_plane *plane)
-{
- struct dpu_plane *pdpu = plane ? to_dpu_plane(plane) : NULL;
- struct dpu_plane_state *pstate;
-
- DPU_DEBUG_PLANE(pdpu, "\n");
-
- if (pdpu) {
- pstate = to_dpu_plane_state(plane->state);
- _dpu_plane_set_qos_ctrl(plane, &pstate->pipe, false);
-
- if (pstate->r_pipe.sspp)
- _dpu_plane_set_qos_ctrl(plane, &pstate->r_pipe, false);
-
- mutex_destroy(&pdpu->lock);
-
- /* this will destroy the states as well */
- drm_plane_cleanup(plane);
-
- kfree(pdpu);
- }
-}
-
static void dpu_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
@@ -1405,7 +1354,6 @@ static bool dpu_plane_format_mod_supported(struct drm_plane *plane,
static const struct drm_plane_funcs dpu_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = dpu_plane_destroy,
.reset = dpu_plane_reset,
.atomic_duplicate_state = dpu_plane_duplicate_state,
.atomic_destroy_state = dpu_plane_destroy_state,
@@ -1433,35 +1381,28 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
struct dpu_hw_sspp *pipe_hw;
uint32_t num_formats;
uint32_t supported_rotations;
- int ret = -EINVAL;
-
- /* create and zero local structure */
- pdpu = kzalloc(sizeof(*pdpu), GFP_KERNEL);
- if (!pdpu) {
- DPU_ERROR("[%u]failed to allocate local plane struct\n", pipe);
- ret = -ENOMEM;
- return ERR_PTR(ret);
- }
-
- /* cache local stuff for later */
- plane = &pdpu->base;
- pdpu->pipe = pipe;
+ int ret;
/* initialize underlying h/w driver */
pipe_hw = dpu_rm_get_sspp(&kms->rm, pipe);
if (!pipe_hw || !pipe_hw->cap || !pipe_hw->cap->sblk) {
DPU_ERROR("[%u]SSPP is invalid\n", pipe);
- goto clean_plane;
+ return ERR_PTR(-EINVAL);
}
format_list = pipe_hw->cap->sblk->format_list;
num_formats = pipe_hw->cap->sblk->num_formats;
- ret = drm_universal_plane_init(dev, plane, 0xff, &dpu_plane_funcs,
+ pdpu = drmm_universal_plane_alloc(dev, struct dpu_plane, base,
+ 0xff, &dpu_plane_funcs,
format_list, num_formats,
supported_format_modifiers, type, NULL);
- if (ret)
- goto clean_plane;
+ if (IS_ERR(pdpu))
+ return ERR_CAST(pdpu);
+
+ /* cache local stuff for later */
+ plane = &pdpu->base;
+ pdpu->pipe = pipe;
pdpu->catalog = kms->catalog;
@@ -1488,13 +1429,7 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
/* success! finalize initialization */
drm_plane_helper_add(plane, &dpu_plane_helper_funcs);
- mutex_init(&pdpu->lock);
-
DPU_DEBUG("%s created for pipe:%u id:%u\n", plane->name,
pipe, plane->base.id);
return plane;
-
-clean_plane:
- kfree(pdpu);
- return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index 8759466e2f37..b58a9c2ae326 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -8,6 +8,7 @@
#include "dpu_kms.h"
#include "dpu_hw_lm.h"
#include "dpu_hw_ctl.h"
+#include "dpu_hw_cdm.h"
#include "dpu_hw_pingpong.h"
#include "dpu_hw_sspp.h"
#include "dpu_hw_intf.h"
@@ -34,72 +35,8 @@ struct dpu_rm_requirements {
struct msm_display_topology topology;
};
-int dpu_rm_destroy(struct dpu_rm *rm)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(rm->dspp_blks); i++) {
- struct dpu_hw_dspp *hw;
-
- if (rm->dspp_blks[i]) {
- hw = to_dpu_hw_dspp(rm->dspp_blks[i]);
- dpu_hw_dspp_destroy(hw);
- }
- }
- for (i = 0; i < ARRAY_SIZE(rm->pingpong_blks); i++) {
- struct dpu_hw_pingpong *hw;
-
- if (rm->pingpong_blks[i]) {
- hw = to_dpu_hw_pingpong(rm->pingpong_blks[i]);
- dpu_hw_pingpong_destroy(hw);
- }
- }
- for (i = 0; i < ARRAY_SIZE(rm->merge_3d_blks); i++) {
- struct dpu_hw_merge_3d *hw;
-
- if (rm->merge_3d_blks[i]) {
- hw = to_dpu_hw_merge_3d(rm->merge_3d_blks[i]);
- dpu_hw_merge_3d_destroy(hw);
- }
- }
- for (i = 0; i < ARRAY_SIZE(rm->mixer_blks); i++) {
- struct dpu_hw_mixer *hw;
-
- if (rm->mixer_blks[i]) {
- hw = to_dpu_hw_mixer(rm->mixer_blks[i]);
- dpu_hw_lm_destroy(hw);
- }
- }
- for (i = 0; i < ARRAY_SIZE(rm->ctl_blks); i++) {
- struct dpu_hw_ctl *hw;
-
- if (rm->ctl_blks[i]) {
- hw = to_dpu_hw_ctl(rm->ctl_blks[i]);
- dpu_hw_ctl_destroy(hw);
- }
- }
- for (i = 0; i < ARRAY_SIZE(rm->hw_intf); i++)
- dpu_hw_intf_destroy(rm->hw_intf[i]);
-
- for (i = 0; i < ARRAY_SIZE(rm->dsc_blks); i++) {
- struct dpu_hw_dsc *hw;
-
- if (rm->dsc_blks[i]) {
- hw = to_dpu_hw_dsc(rm->dsc_blks[i]);
- dpu_hw_dsc_destroy(hw);
- }
- }
-
- for (i = 0; i < ARRAY_SIZE(rm->hw_wb); i++)
- dpu_hw_wb_destroy(rm->hw_wb[i]);
-
- for (i = 0; i < ARRAY_SIZE(rm->hw_sspp); i++)
- dpu_hw_sspp_destroy(rm->hw_sspp[i]);
-
- return 0;
-}
-
-int dpu_rm_init(struct dpu_rm *rm,
+int dpu_rm_init(struct drm_device *dev,
+ struct dpu_rm *rm,
const struct dpu_mdss_cfg *cat,
const struct msm_mdss_data *mdss_data,
void __iomem *mmio)
@@ -119,7 +56,7 @@ int dpu_rm_init(struct dpu_rm *rm,
struct dpu_hw_mixer *hw;
const struct dpu_lm_cfg *lm = &cat->mixer[i];
- hw = dpu_hw_lm_init(lm, mmio);
+ hw = dpu_hw_lm_init(dev, lm, mmio);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed lm object creation: err %d\n", rc);
@@ -132,7 +69,7 @@ int dpu_rm_init(struct dpu_rm *rm,
struct dpu_hw_merge_3d *hw;
const struct dpu_merge_3d_cfg *merge_3d = &cat->merge_3d[i];
- hw = dpu_hw_merge_3d_init(merge_3d, mmio);
+ hw = dpu_hw_merge_3d_init(dev, merge_3d, mmio);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed merge_3d object creation: err %d\n",
@@ -146,7 +83,7 @@ int dpu_rm_init(struct dpu_rm *rm,
struct dpu_hw_pingpong *hw;
const struct dpu_pingpong_cfg *pp = &cat->pingpong[i];
- hw = dpu_hw_pingpong_init(pp, mmio, cat->mdss_ver);
+ hw = dpu_hw_pingpong_init(dev, pp, mmio, cat->mdss_ver);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed pingpong object creation: err %d\n",
@@ -162,7 +99,7 @@ int dpu_rm_init(struct dpu_rm *rm,
struct dpu_hw_intf *hw;
const struct dpu_intf_cfg *intf = &cat->intf[i];
- hw = dpu_hw_intf_init(intf, mmio, cat->mdss_ver);
+ hw = dpu_hw_intf_init(dev, intf, mmio, cat->mdss_ver);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed intf object creation: err %d\n", rc);
@@ -175,7 +112,7 @@ int dpu_rm_init(struct dpu_rm *rm,
struct dpu_hw_wb *hw;
const struct dpu_wb_cfg *wb = &cat->wb[i];
- hw = dpu_hw_wb_init(wb, mmio, cat->mdss_ver);
+ hw = dpu_hw_wb_init(dev, wb, mmio, cat->mdss_ver);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed wb object creation: err %d\n", rc);
@@ -188,7 +125,7 @@ int dpu_rm_init(struct dpu_rm *rm,
struct dpu_hw_ctl *hw;
const struct dpu_ctl_cfg *ctl = &cat->ctl[i];
- hw = dpu_hw_ctl_init(ctl, mmio, cat->mixer_count, cat->mixer);
+ hw = dpu_hw_ctl_init(dev, ctl, mmio, cat->mixer_count, cat->mixer);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed ctl object creation: err %d\n", rc);
@@ -201,7 +138,7 @@ int dpu_rm_init(struct dpu_rm *rm,
struct dpu_hw_dspp *hw;
const struct dpu_dspp_cfg *dspp = &cat->dspp[i];
- hw = dpu_hw_dspp_init(dspp, mmio);
+ hw = dpu_hw_dspp_init(dev, dspp, mmio);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed dspp object creation: err %d\n", rc);
@@ -215,9 +152,9 @@ int dpu_rm_init(struct dpu_rm *rm,
const struct dpu_dsc_cfg *dsc = &cat->dsc[i];
if (test_bit(DPU_DSC_HW_REV_1_2, &dsc->features))
- hw = dpu_hw_dsc_init_1_2(dsc, mmio);
+ hw = dpu_hw_dsc_init_1_2(dev, dsc, mmio);
else
- hw = dpu_hw_dsc_init(dsc, mmio);
+ hw = dpu_hw_dsc_init(dev, dsc, mmio);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
@@ -231,7 +168,7 @@ int dpu_rm_init(struct dpu_rm *rm,
struct dpu_hw_sspp *hw;
const struct dpu_sspp_cfg *sspp = &cat->sspp[i];
- hw = dpu_hw_sspp_init(sspp, mmio, mdss_data, cat->mdss_ver);
+ hw = dpu_hw_sspp_init(dev, sspp, mmio, mdss_data, cat->mdss_ver);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed sspp object creation: err %d\n", rc);
@@ -240,11 +177,21 @@ int dpu_rm_init(struct dpu_rm *rm,
rm->hw_sspp[sspp->id - SSPP_NONE] = hw;
}
+ if (cat->cdm) {
+ struct dpu_hw_cdm *hw;
+
+ hw = dpu_hw_cdm_init(dev, cat->cdm, mmio, cat->mdss_ver);
+ if (IS_ERR(hw)) {
+ rc = PTR_ERR(hw);
+ DPU_ERROR("failed cdm object creation: err %d\n", rc);
+ goto fail;
+ }
+ rm->cdm_blk = &hw->base;
+ }
+
return 0;
fail:
- dpu_rm_destroy(rm);
-
return rc ? rc : -EFAULT;
}
@@ -488,6 +435,26 @@ static int _dpu_rm_reserve_dsc(struct dpu_rm *rm,
return 0;
}
+static int _dpu_rm_reserve_cdm(struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
+ struct drm_encoder *enc)
+{
+ /* try allocating only one CDM block */
+ if (!rm->cdm_blk) {
+ DPU_ERROR("CDM block does not exist\n");
+ return -EIO;
+ }
+
+ if (global_state->cdm_to_enc_id) {
+ DPU_ERROR("CDM_0 is already allocated\n");
+ return -EIO;
+ }
+
+ global_state->cdm_to_enc_id = enc->base.id;
+
+ return 0;
+}
+
static int _dpu_rm_make_reservation(
struct dpu_rm *rm,
struct dpu_global_state *global_state,
@@ -513,6 +480,14 @@ static int _dpu_rm_make_reservation(
if (ret)
return ret;
+ if (reqs->topology.needs_cdm) {
+ ret = _dpu_rm_reserve_cdm(rm, global_state, enc);
+ if (ret) {
+ DPU_ERROR("unable to find CDM blk\n");
+ return ret;
+ }
+ }
+
return ret;
}
@@ -523,9 +498,9 @@ static int _dpu_rm_populate_requirements(
{
reqs->topology = req_topology;
- DRM_DEBUG_KMS("num_lm: %d num_dsc: %d num_intf: %d\n",
+ DRM_DEBUG_KMS("num_lm: %d num_dsc: %d num_intf: %d cdm: %d\n",
reqs->topology.num_lm, reqs->topology.num_dsc,
- reqs->topology.num_intf);
+ reqs->topology.num_intf, reqs->topology.needs_cdm);
return 0;
}
@@ -554,6 +529,7 @@ void dpu_rm_release(struct dpu_global_state *global_state,
ARRAY_SIZE(global_state->dsc_to_enc_id), enc->base.id);
_dpu_rm_clear_mapping(global_state->dspp_to_enc_id,
ARRAY_SIZE(global_state->dspp_to_enc_id), enc->base.id);
+ _dpu_rm_clear_mapping(&global_state->cdm_to_enc_id, 1, enc->base.id);
}
int dpu_rm_reserve(
@@ -627,6 +603,11 @@ int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
hw_to_enc_id = global_state->dsc_to_enc_id;
max_blks = ARRAY_SIZE(rm->dsc_blks);
break;
+ case DPU_HW_BLK_CDM:
+ hw_blks = &rm->cdm_blk;
+ hw_to_enc_id = &global_state->cdm_to_enc_id;
+ max_blks = 1;
+ break;
default:
DPU_ERROR("blk type %d not managed by rm\n", type);
return 0;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
index 2b551566cbf4..e3f83ebc656b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
@@ -22,6 +22,7 @@ struct dpu_global_state;
* @hw_wb: array of wb hardware resources
* @dspp_blks: array of dspp hardware resources
* @hw_sspp: array of sspp hardware resources
+ * @cdm_blk: cdm hardware resource
*/
struct dpu_rm {
struct dpu_hw_blk *pingpong_blks[PINGPONG_MAX - PINGPONG_0];
@@ -33,30 +34,26 @@ struct dpu_rm {
struct dpu_hw_blk *merge_3d_blks[MERGE_3D_MAX - MERGE_3D_0];
struct dpu_hw_blk *dsc_blks[DSC_MAX - DSC_0];
struct dpu_hw_sspp *hw_sspp[SSPP_MAX - SSPP_NONE];
+ struct dpu_hw_blk *cdm_blk;
};
/**
* dpu_rm_init - Read hardware catalog and create reservation tracking objects
* for all HW blocks.
+ * @dev: Corresponding device for devres management
* @rm: DPU Resource Manager handle
* @cat: Pointer to hardware catalog
* @mdss_data: Pointer to MDSS / UBWC configuration
* @mmio: mapped register io address of MDP
* @Return: 0 on Success otherwise -ERROR
*/
-int dpu_rm_init(struct dpu_rm *rm,
+int dpu_rm_init(struct drm_device *dev,
+ struct dpu_rm *rm,
const struct dpu_mdss_cfg *cat,
const struct msm_mdss_data *mdss_data,
void __iomem *mmio);
/**
- * dpu_rm_destroy - Free all memory allocated by dpu_rm_init
- * @rm: DPU Resource Manager handle
- * @Return: 0 on Success otherwise -ERROR
- */
-int dpu_rm_destroy(struct dpu_rm *rm);
-
-/**
* dpu_rm_reserve - Given a CRTC->Encoder->Connector display chain, analyze
* the use connections and user requirements, specified through related
* topology control properties, and reserve hardware blocks to that
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
index 169f9de4a12a..75f93e346282 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
@@ -6,6 +6,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_flip_work.h>
+#include <drm/drm_managed.h>
#include <drm/drm_mode.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -123,16 +124,6 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
drm_gem_object_put(val);
}
-static void mdp4_crtc_destroy(struct drm_crtc *crtc)
-{
- struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
-
- drm_crtc_cleanup(crtc);
- drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work);
-
- kfree(mdp4_crtc);
-}
-
/* statically (for now) map planes to mixer stage (z-order): */
static const int idxs[] = {
[VG1] = 1,
@@ -269,6 +260,7 @@ static void mdp4_crtc_atomic_disable(struct drm_crtc *crtc,
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ unsigned long flags;
DBG("%s", mdp4_crtc->name);
@@ -281,6 +273,14 @@ static void mdp4_crtc_atomic_disable(struct drm_crtc *crtc,
mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err);
mdp4_disable(mdp4_kms);
+ if (crtc->state->event && !crtc->state->active) {
+ WARN_ON(mdp4_crtc->event);
+ spin_lock_irqsave(&mdp4_kms->dev->event_lock, flags);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ spin_unlock_irqrestore(&mdp4_kms->dev->event_lock, flags);
+ }
+
mdp4_crtc->enabled = false;
}
@@ -475,7 +475,6 @@ static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
static const struct drm_crtc_funcs mdp4_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
- .destroy = mdp4_crtc_destroy,
.page_flip = drm_atomic_helper_page_flip,
.cursor_set = mdp4_crtc_cursor_set,
.cursor_move = mdp4_crtc_cursor_move,
@@ -616,6 +615,13 @@ static const char *dma_names[] = {
"DMA_P", "DMA_S", "DMA_E",
};
+static void mdp4_crtc_flip_cleanup(struct drm_device *dev, void *ptr)
+{
+ struct mdp4_crtc *mdp4_crtc = ptr;
+
+ drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work);
+}
+
/* initialize crtc */
struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
struct drm_plane *plane, int id, int ovlp_id,
@@ -623,10 +629,13 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
{
struct drm_crtc *crtc = NULL;
struct mdp4_crtc *mdp4_crtc;
+ int ret;
- mdp4_crtc = kzalloc(sizeof(*mdp4_crtc), GFP_KERNEL);
- if (!mdp4_crtc)
- return ERR_PTR(-ENOMEM);
+ mdp4_crtc = drmm_crtc_alloc_with_planes(dev, struct mdp4_crtc, base,
+ plane, NULL,
+ &mdp4_crtc_funcs, NULL);
+ if (IS_ERR(mdp4_crtc))
+ return ERR_CAST(mdp4_crtc);
crtc = &mdp4_crtc->base;
@@ -648,9 +657,10 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
drm_flip_work_init(&mdp4_crtc->unref_cursor_work,
"unref cursor", unref_cursor_worker);
+ ret = drmm_add_action_or_reset(dev, mdp4_crtc_flip_cleanup, mdp4_crtc);
+ if (ret)
+ return ERR_PTR(ret);
- drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp4_crtc_funcs,
- NULL);
drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
return crtc;
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c
index 39b8fe53c29d..74dafe7106be 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c
@@ -26,18 +26,6 @@ static struct mdp4_kms *get_kms(struct drm_encoder *encoder)
return to_mdp4_kms(to_mdp_kms(priv->kms));
}
-static void mdp4_dsi_encoder_destroy(struct drm_encoder *encoder)
-{
- struct mdp4_dsi_encoder *mdp4_dsi_encoder = to_mdp4_dsi_encoder(encoder);
-
- drm_encoder_cleanup(encoder);
- kfree(mdp4_dsi_encoder);
-}
-
-static const struct drm_encoder_funcs mdp4_dsi_encoder_funcs = {
- .destroy = mdp4_dsi_encoder_destroy,
-};
-
static void mdp4_dsi_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -148,28 +136,18 @@ static const struct drm_encoder_helper_funcs mdp4_dsi_encoder_helper_funcs = {
/* initialize encoder */
struct drm_encoder *mdp4_dsi_encoder_init(struct drm_device *dev)
{
- struct drm_encoder *encoder = NULL;
+ struct drm_encoder *encoder;
struct mdp4_dsi_encoder *mdp4_dsi_encoder;
- int ret;
- mdp4_dsi_encoder = kzalloc(sizeof(*mdp4_dsi_encoder), GFP_KERNEL);
- if (!mdp4_dsi_encoder) {
- ret = -ENOMEM;
- goto fail;
- }
+ mdp4_dsi_encoder = drmm_encoder_alloc(dev, struct mdp4_dsi_encoder, base,
+ NULL, DRM_MODE_ENCODER_DSI, NULL);
+ if (IS_ERR(mdp4_dsi_encoder))
+ return ERR_CAST(mdp4_dsi_encoder);
encoder = &mdp4_dsi_encoder->base;
- drm_encoder_init(dev, encoder, &mdp4_dsi_encoder_funcs,
- DRM_MODE_ENCODER_DSI, NULL);
drm_encoder_helper_add(encoder, &mdp4_dsi_encoder_helper_funcs);
return encoder;
-
-fail:
- if (encoder)
- mdp4_dsi_encoder_destroy(encoder);
-
- return ERR_PTR(ret);
}
#endif /* CONFIG_DRM_MSM_DSI */
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c
index 88645dbc3785..3b70764b48c4 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c
@@ -25,17 +25,6 @@ static struct mdp4_kms *get_kms(struct drm_encoder *encoder)
return to_mdp4_kms(to_mdp_kms(priv->kms));
}
-static void mdp4_dtv_encoder_destroy(struct drm_encoder *encoder)
-{
- struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
- drm_encoder_cleanup(encoder);
- kfree(mdp4_dtv_encoder);
-}
-
-static const struct drm_encoder_funcs mdp4_dtv_encoder_funcs = {
- .destroy = mdp4_dtv_encoder_destroy,
-};
-
static void mdp4_dtv_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -173,41 +162,29 @@ long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate)
/* initialize encoder */
struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev)
{
- struct drm_encoder *encoder = NULL;
+ struct drm_encoder *encoder;
struct mdp4_dtv_encoder *mdp4_dtv_encoder;
- int ret;
- mdp4_dtv_encoder = kzalloc(sizeof(*mdp4_dtv_encoder), GFP_KERNEL);
- if (!mdp4_dtv_encoder) {
- ret = -ENOMEM;
- goto fail;
- }
+ mdp4_dtv_encoder = drmm_encoder_alloc(dev, struct mdp4_dtv_encoder, base,
+ NULL, DRM_MODE_ENCODER_TMDS, NULL);
+ if (IS_ERR(mdp4_dtv_encoder))
+ return ERR_CAST(mdp4_dtv_encoder);
encoder = &mdp4_dtv_encoder->base;
- drm_encoder_init(dev, encoder, &mdp4_dtv_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
drm_encoder_helper_add(encoder, &mdp4_dtv_encoder_helper_funcs);
mdp4_dtv_encoder->hdmi_clk = devm_clk_get(dev->dev, "hdmi_clk");
if (IS_ERR(mdp4_dtv_encoder->hdmi_clk)) {
DRM_DEV_ERROR(dev->dev, "failed to get hdmi_clk\n");
- ret = PTR_ERR(mdp4_dtv_encoder->hdmi_clk);
- goto fail;
+ return ERR_CAST(mdp4_dtv_encoder->hdmi_clk);
}
mdp4_dtv_encoder->mdp_clk = devm_clk_get(dev->dev, "tv_clk");
if (IS_ERR(mdp4_dtv_encoder->mdp_clk)) {
DRM_DEV_ERROR(dev->dev, "failed to get tv_clk\n");
- ret = PTR_ERR(mdp4_dtv_encoder->mdp_clk);
- goto fail;
+ return ERR_CAST(mdp4_dtv_encoder->mdp_clk);
}
return encoder;
-
-fail:
- if (encoder)
- mdp4_dtv_encoder_destroy(encoder);
-
- return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
index 10eb3e5b218e..576995ddce37 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
@@ -18,7 +18,7 @@ struct mdp4_lcdc_encoder {
struct drm_panel *panel;
struct clk *lcdc_clk;
unsigned long int pixclock;
- struct regulator *regs[3];
+ struct regulator_bulk_data regs[3];
bool enabled;
uint32_t bsc;
};
@@ -30,18 +30,6 @@ static struct mdp4_kms *get_kms(struct drm_encoder *encoder)
return to_mdp4_kms(to_mdp_kms(priv->kms));
}
-static void mdp4_lcdc_encoder_destroy(struct drm_encoder *encoder)
-{
- struct mdp4_lcdc_encoder *mdp4_lcdc_encoder =
- to_mdp4_lcdc_encoder(encoder);
- drm_encoder_cleanup(encoder);
- kfree(mdp4_lcdc_encoder);
-}
-
-static const struct drm_encoder_funcs mdp4_lcdc_encoder_funcs = {
- .destroy = mdp4_lcdc_encoder_destroy,
-};
-
/* this should probably be a helper: */
static struct drm_connector *get_connector(struct drm_encoder *encoder)
{
@@ -271,12 +259,10 @@ static void mdp4_lcdc_encoder_mode_set(struct drm_encoder *encoder,
static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder)
{
- struct drm_device *dev = encoder->dev;
struct mdp4_lcdc_encoder *mdp4_lcdc_encoder =
to_mdp4_lcdc_encoder(encoder);
struct mdp4_kms *mdp4_kms = get_kms(encoder);
struct drm_panel *panel;
- int i, ret;
if (WARN_ON(!mdp4_lcdc_encoder->enabled))
return;
@@ -301,11 +287,8 @@ static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder)
clk_disable_unprepare(mdp4_lcdc_encoder->lcdc_clk);
- for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) {
- ret = regulator_disable(mdp4_lcdc_encoder->regs[i]);
- if (ret)
- DRM_DEV_ERROR(dev->dev, "failed to disable regulator: %d\n", ret);
- }
+ regulator_bulk_disable(ARRAY_SIZE(mdp4_lcdc_encoder->regs),
+ mdp4_lcdc_encoder->regs);
mdp4_lcdc_encoder->enabled = false;
}
@@ -319,7 +302,7 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
struct mdp4_kms *mdp4_kms = get_kms(encoder);
struct drm_panel *panel;
uint32_t config;
- int i, ret;
+ int ret;
if (WARN_ON(mdp4_lcdc_encoder->enabled))
return;
@@ -339,11 +322,10 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
mdp4_crtc_set_config(encoder->crtc, config);
mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV, 0);
- for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) {
- ret = regulator_enable(mdp4_lcdc_encoder->regs[i]);
- if (ret)
- DRM_DEV_ERROR(dev->dev, "failed to enable regulator: %d\n", ret);
- }
+ ret = regulator_bulk_enable(ARRAY_SIZE(mdp4_lcdc_encoder->regs),
+ mdp4_lcdc_encoder->regs);
+ if (ret)
+ DRM_DEV_ERROR(dev->dev, "failed to enable regulators: %d\n", ret);
DBG("setting lcdc_clk=%lu", pc);
ret = clk_set_rate(mdp4_lcdc_encoder->lcdc_clk, pc);
@@ -383,63 +365,38 @@ long mdp4_lcdc_round_pixclk(struct drm_encoder *encoder, unsigned long rate)
struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
struct device_node *panel_node)
{
- struct drm_encoder *encoder = NULL;
+ struct drm_encoder *encoder;
struct mdp4_lcdc_encoder *mdp4_lcdc_encoder;
- struct regulator *reg;
int ret;
- mdp4_lcdc_encoder = kzalloc(sizeof(*mdp4_lcdc_encoder), GFP_KERNEL);
- if (!mdp4_lcdc_encoder) {
- ret = -ENOMEM;
- goto fail;
- }
+ mdp4_lcdc_encoder = drmm_encoder_alloc(dev, struct mdp4_lcdc_encoder, base,
+ NULL, DRM_MODE_ENCODER_LVDS, NULL);
+ if (IS_ERR(mdp4_lcdc_encoder))
+ return ERR_CAST(mdp4_lcdc_encoder);
mdp4_lcdc_encoder->panel_node = panel_node;
encoder = &mdp4_lcdc_encoder->base;
- drm_encoder_init(dev, encoder, &mdp4_lcdc_encoder_funcs,
- DRM_MODE_ENCODER_LVDS, NULL);
drm_encoder_helper_add(encoder, &mdp4_lcdc_encoder_helper_funcs);
/* TODO: do we need different pll in other cases? */
mdp4_lcdc_encoder->lcdc_clk = mpd4_lvds_pll_init(dev);
if (IS_ERR(mdp4_lcdc_encoder->lcdc_clk)) {
DRM_DEV_ERROR(dev->dev, "failed to get lvds_clk\n");
- ret = PTR_ERR(mdp4_lcdc_encoder->lcdc_clk);
- goto fail;
+ return ERR_CAST(mdp4_lcdc_encoder->lcdc_clk);
}
/* TODO: different regulators in other cases? */
- reg = devm_regulator_get(dev->dev, "lvds-vccs-3p3v");
- if (IS_ERR(reg)) {
- ret = PTR_ERR(reg);
- DRM_DEV_ERROR(dev->dev, "failed to get lvds-vccs-3p3v: %d\n", ret);
- goto fail;
- }
- mdp4_lcdc_encoder->regs[0] = reg;
-
- reg = devm_regulator_get(dev->dev, "lvds-pll-vdda");
- if (IS_ERR(reg)) {
- ret = PTR_ERR(reg);
- DRM_DEV_ERROR(dev->dev, "failed to get lvds-pll-vdda: %d\n", ret);
- goto fail;
- }
- mdp4_lcdc_encoder->regs[1] = reg;
+ mdp4_lcdc_encoder->regs[0].supply = "lvds-vccs-3p3v";
+ mdp4_lcdc_encoder->regs[1].supply = "lvds-vccs-3p3v";
+ mdp4_lcdc_encoder->regs[2].supply = "lvds-vdda";
- reg = devm_regulator_get(dev->dev, "lvds-vdda");
- if (IS_ERR(reg)) {
- ret = PTR_ERR(reg);
- DRM_DEV_ERROR(dev->dev, "failed to get lvds-vdda: %d\n", ret);
- goto fail;
- }
- mdp4_lcdc_encoder->regs[2] = reg;
+ ret = devm_regulator_bulk_get(dev->dev,
+ ARRAY_SIZE(mdp4_lcdc_encoder->regs),
+ mdp4_lcdc_encoder->regs);
+ if (ret)
+ return ERR_PTR(ret);
return encoder;
-
-fail:
- if (encoder)
- mdp4_lcdc_encoder_destroy(encoder);
-
- return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
index 694d54341337..c5179e4c393c 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
@@ -1350,23 +1350,17 @@ int mdp5_cfg_get_hw_rev(struct mdp5_cfg_handler *cfg_handler)
return cfg_handler->revision;
}
-void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_handler)
-{
- kfree(cfg_handler);
-}
-
struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
uint32_t major, uint32_t minor)
{
struct drm_device *dev = mdp5_kms->dev;
struct mdp5_cfg_handler *cfg_handler;
const struct mdp5_cfg_handler *cfg_handlers;
- int i, ret = 0, num_handlers;
+ int i, num_handlers;
- cfg_handler = kzalloc(sizeof(*cfg_handler), GFP_KERNEL);
+ cfg_handler = devm_kzalloc(dev->dev, sizeof(*cfg_handler), GFP_KERNEL);
if (unlikely(!cfg_handler)) {
- ret = -ENOMEM;
- goto fail;
+ return ERR_PTR(-ENOMEM);
}
switch (major) {
@@ -1381,8 +1375,7 @@ struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
default:
DRM_DEV_ERROR(dev->dev, "unexpected MDP major version: v%d.%d\n",
major, minor);
- ret = -ENXIO;
- goto fail;
+ return ERR_PTR(-ENXIO);
}
/* only after mdp5_cfg global pointer's init can we access the hw */
@@ -1396,8 +1389,7 @@ struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
if (unlikely(!mdp5_cfg)) {
DRM_DEV_ERROR(dev->dev, "unexpected MDP minor revision: v%d.%d\n",
major, minor);
- ret = -ENXIO;
- goto fail;
+ return ERR_PTR(-ENXIO);
}
cfg_handler->revision = minor;
@@ -1406,10 +1398,4 @@ struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
DBG("MDP5: %s hw config selected", mdp5_cfg->name);
return cfg_handler;
-
-fail:
- if (cfg_handler)
- mdp5_cfg_destroy(cfg_handler);
-
- return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h
index c2502cc33864..26c5d8b4ab46 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h
@@ -121,6 +121,5 @@ int mdp5_cfg_get_hw_rev(struct mdp5_cfg_handler *cfg_hnd);
struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
uint32_t major, uint32_t minor);
-void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_hnd);
#endif /* __MDP5_CFG_H__ */
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index 86036dd4e1e8..4a3db2ea1689 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -13,6 +13,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_flip_work.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -172,14 +173,11 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
drm_gem_object_put(val);
}
-static void mdp5_crtc_destroy(struct drm_crtc *crtc)
+static void mdp5_crtc_flip_cleanup(struct drm_device *dev, void *ptr)
{
- struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_crtc *mdp5_crtc = ptr;
- drm_crtc_cleanup(crtc);
drm_flip_work_cleanup(&mdp5_crtc->unref_cursor_work);
-
- kfree(mdp5_crtc);
}
static inline u32 mdp5_lm_use_fg_alpha_mask(enum mdp_mixer_stage_id stage)
@@ -1147,7 +1145,6 @@ static void mdp5_crtc_reset(struct drm_crtc *crtc)
static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = {
.set_config = drm_atomic_helper_set_config,
- .destroy = mdp5_crtc_destroy,
.page_flip = drm_atomic_helper_page_flip,
.reset = mdp5_crtc_reset,
.atomic_duplicate_state = mdp5_crtc_duplicate_state,
@@ -1161,7 +1158,6 @@ static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = {
static const struct drm_crtc_funcs mdp5_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
- .destroy = mdp5_crtc_destroy,
.page_flip = drm_atomic_helper_page_flip,
.reset = mdp5_crtc_reset,
.atomic_duplicate_state = mdp5_crtc_duplicate_state,
@@ -1327,10 +1323,16 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
{
struct drm_crtc *crtc = NULL;
struct mdp5_crtc *mdp5_crtc;
+ int ret;
- mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL);
- if (!mdp5_crtc)
- return ERR_PTR(-ENOMEM);
+ mdp5_crtc = drmm_crtc_alloc_with_planes(dev, struct mdp5_crtc, base,
+ plane, cursor_plane,
+ cursor_plane ?
+ &mdp5_crtc_no_lm_cursor_funcs :
+ &mdp5_crtc_funcs,
+ NULL);
+ if (IS_ERR(mdp5_crtc))
+ return ERR_CAST(mdp5_crtc);
crtc = &mdp5_crtc->base;
@@ -1346,13 +1348,11 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
mdp5_crtc->lm_cursor_enabled = cursor_plane ? false : true;
- drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane,
- cursor_plane ?
- &mdp5_crtc_no_lm_cursor_funcs :
- &mdp5_crtc_funcs, NULL);
-
drm_flip_work_init(&mdp5_crtc->unref_cursor_work,
"unref cursor", unref_cursor_worker);
+ ret = drmm_add_action_or_reset(dev, mdp5_crtc_flip_cleanup, mdp5_crtc);
+ if (ret)
+ return ERR_PTR(ret);
drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
index 1220f2b20e05..666de99a46a5 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
@@ -681,11 +681,6 @@ void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctl_mgr)
}
}
-void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctl_mgr)
-{
- kfree(ctl_mgr);
-}
-
struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd)
{
@@ -697,18 +692,16 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
unsigned long flags;
int c, ret;
- ctl_mgr = kzalloc(sizeof(*ctl_mgr), GFP_KERNEL);
+ ctl_mgr = devm_kzalloc(dev->dev, sizeof(*ctl_mgr), GFP_KERNEL);
if (!ctl_mgr) {
DRM_DEV_ERROR(dev->dev, "failed to allocate CTL manager\n");
- ret = -ENOMEM;
- goto fail;
+ return ERR_PTR(-ENOMEM);
}
if (WARN_ON(ctl_cfg->count > MAX_CTL)) {
DRM_DEV_ERROR(dev->dev, "Increase static pool size to at least %d\n",
ctl_cfg->count);
- ret = -ENOSPC;
- goto fail;
+ return ERR_PTR(-ENOSPC);
}
/* initialize the CTL manager: */
@@ -727,7 +720,7 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
DRM_DEV_ERROR(dev->dev, "CTL_%d: base is null!\n", c);
ret = -EINVAL;
spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
- goto fail;
+ return ERR_PTR(ret);
}
ctl->ctlm = ctl_mgr;
ctl->id = c;
@@ -755,10 +748,4 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
DBG("Pool of %d CTLs created.", ctl_mgr->nctl);
return ctl_mgr;
-
-fail:
- if (ctl_mgr)
- mdp5_ctlm_destroy(ctl_mgr);
-
- return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h
index c2af68aa77ae..9020e8efc4e4 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h
@@ -17,7 +17,6 @@ struct mdp5_ctl_manager;
struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd);
void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctlm);
-void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm);
/*
* CTL prototypes:
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c
index 79d67c495780..8db97083e14d 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c
@@ -16,17 +16,6 @@ static struct mdp5_kms *get_kms(struct drm_encoder *encoder)
return to_mdp5_kms(to_mdp_kms(priv->kms));
}
-static void mdp5_encoder_destroy(struct drm_encoder *encoder)
-{
- struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
- drm_encoder_cleanup(encoder);
- kfree(mdp5_encoder);
-}
-
-static const struct drm_encoder_funcs mdp5_encoder_funcs = {
- .destroy = mdp5_encoder_destroy,
-};
-
static void mdp5_vid_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -342,13 +331,11 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
struct mdp5_encoder *mdp5_encoder;
int enc_type = (intf->type == INTF_DSI) ?
DRM_MODE_ENCODER_DSI : DRM_MODE_ENCODER_TMDS;
- int ret;
- mdp5_encoder = kzalloc(sizeof(*mdp5_encoder), GFP_KERNEL);
- if (!mdp5_encoder) {
- ret = -ENOMEM;
- goto fail;
- }
+ mdp5_encoder = drmm_encoder_alloc(dev, struct mdp5_encoder, base,
+ NULL, enc_type, NULL);
+ if (IS_ERR(mdp5_encoder))
+ return ERR_CAST(mdp5_encoder);
encoder = &mdp5_encoder->base;
mdp5_encoder->ctl = ctl;
@@ -356,15 +343,7 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
spin_lock_init(&mdp5_encoder->intf_lock);
- drm_encoder_init(dev, encoder, &mdp5_encoder_funcs, enc_type, NULL);
-
drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs);
return encoder;
-
-fail:
- if (encoder)
- mdp5_encoder_destroy(encoder);
-
- return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index ec933d597e20..0827634664ae 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -209,13 +209,6 @@ static void mdp5_kms_destroy(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
struct msm_gem_address_space *aspace = kms->aspace;
- int i;
-
- for (i = 0; i < mdp5_kms->num_hwmixers; i++)
- mdp5_mixer_destroy(mdp5_kms->hwmixers[i]);
-
- for (i = 0; i < mdp5_kms->num_hwpipes; i++)
- mdp5_pipe_destroy(mdp5_kms->hwpipes[i]);
if (aspace) {
aspace->mmu->funcs->detach(aspace->mmu);
@@ -623,18 +616,6 @@ fail:
static void mdp5_destroy(struct mdp5_kms *mdp5_kms)
{
- int i;
-
- if (mdp5_kms->ctlm)
- mdp5_ctlm_destroy(mdp5_kms->ctlm);
- if (mdp5_kms->smp)
- mdp5_smp_destroy(mdp5_kms->smp);
- if (mdp5_kms->cfg)
- mdp5_cfg_destroy(mdp5_kms->cfg);
-
- for (i = 0; i < mdp5_kms->num_intfs; i++)
- kfree(mdp5_kms->intfs[i]);
-
if (mdp5_kms->rpm_enabled)
pm_runtime_disable(&mdp5_kms->pdev->dev);
@@ -652,7 +633,7 @@ static int construct_pipes(struct mdp5_kms *mdp5_kms, int cnt,
for (i = 0; i < cnt; i++) {
struct mdp5_hw_pipe *hwpipe;
- hwpipe = mdp5_pipe_init(pipes[i], offsets[i], caps);
+ hwpipe = mdp5_pipe_init(dev, pipes[i], offsets[i], caps);
if (IS_ERR(hwpipe)) {
ret = PTR_ERR(hwpipe);
DRM_DEV_ERROR(dev->dev, "failed to construct pipe for %s (%d)\n",
@@ -724,7 +705,7 @@ static int hwmixer_init(struct mdp5_kms *mdp5_kms)
for (i = 0; i < hw_cfg->lm.count; i++) {
struct mdp5_hw_mixer *mixer;
- mixer = mdp5_mixer_init(&hw_cfg->lm.instances[i]);
+ mixer = mdp5_mixer_init(dev, &hw_cfg->lm.instances[i]);
if (IS_ERR(mixer)) {
ret = PTR_ERR(mixer);
DRM_DEV_ERROR(dev->dev, "failed to construct LM%d (%d)\n",
@@ -755,7 +736,7 @@ static int interface_init(struct mdp5_kms *mdp5_kms)
if (intf_types[i] == INTF_DISABLED)
continue;
- intf = kzalloc(sizeof(*intf), GFP_KERNEL);
+ intf = devm_kzalloc(dev->dev, sizeof(*intf), GFP_KERNEL);
if (!intf) {
DRM_DEV_ERROR(dev->dev, "failed to construct INTF%d\n", i);
return -ENOMEM;
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c
index 2536def2a000..2822b533f807 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c
@@ -140,20 +140,16 @@ int mdp5_mixer_release(struct drm_atomic_state *s, struct mdp5_hw_mixer *mixer)
return 0;
}
-void mdp5_mixer_destroy(struct mdp5_hw_mixer *mixer)
-{
- kfree(mixer);
-}
-
static const char * const mixer_names[] = {
"LM0", "LM1", "LM2", "LM3", "LM4", "LM5",
};
-struct mdp5_hw_mixer *mdp5_mixer_init(const struct mdp5_lm_instance *lm)
+struct mdp5_hw_mixer *mdp5_mixer_init(struct drm_device *dev,
+ const struct mdp5_lm_instance *lm)
{
struct mdp5_hw_mixer *mixer;
- mixer = kzalloc(sizeof(*mixer), GFP_KERNEL);
+ mixer = devm_kzalloc(dev->dev, sizeof(*mixer), GFP_KERNEL);
if (!mixer)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h
index 545ee223b9d7..2bedd75835bc 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h
@@ -25,8 +25,8 @@ struct mdp5_hw_mixer_state {
struct drm_crtc *hwmixer_to_crtc[8];
};
-struct mdp5_hw_mixer *mdp5_mixer_init(const struct mdp5_lm_instance *lm);
-void mdp5_mixer_destroy(struct mdp5_hw_mixer *lm);
+struct mdp5_hw_mixer *mdp5_mixer_init(struct drm_device *dev,
+ const struct mdp5_lm_instance *lm);
int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc,
uint32_t caps, struct mdp5_hw_mixer **mixer,
struct mdp5_hw_mixer **r_mixer);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
index e4b8a789835a..99b2c30b1d48 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
@@ -151,17 +151,13 @@ int mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe)
return 0;
}
-void mdp5_pipe_destroy(struct mdp5_hw_pipe *hwpipe)
-{
- kfree(hwpipe);
-}
-
-struct mdp5_hw_pipe *mdp5_pipe_init(enum mdp5_pipe pipe,
+struct mdp5_hw_pipe *mdp5_pipe_init(struct drm_device *dev,
+ enum mdp5_pipe pipe,
uint32_t reg_offset, uint32_t caps)
{
struct mdp5_hw_pipe *hwpipe;
- hwpipe = kzalloc(sizeof(*hwpipe), GFP_KERNEL);
+ hwpipe = devm_kzalloc(dev->dev, sizeof(*hwpipe), GFP_KERNEL);
if (!hwpipe)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h
index cca67938cab2..452138821f60 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h
@@ -39,8 +39,8 @@ int mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane,
struct mdp5_hw_pipe **r_hwpipe);
int mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe);
-struct mdp5_hw_pipe *mdp5_pipe_init(enum mdp5_pipe pipe,
+struct mdp5_hw_pipe *mdp5_pipe_init(struct drm_device *dev,
+ enum mdp5_pipe pipe,
uint32_t reg_offset, uint32_t caps);
-void mdp5_pipe_destroy(struct mdp5_hw_pipe *hwpipe);
#endif /* __MDP5_PIPE_H__ */
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
index b68682c1b5bc..8b59562e29e2 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
@@ -370,23 +370,17 @@ void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p)
drm_modeset_unlock(&mdp5_kms->glob_state_lock);
}
-void mdp5_smp_destroy(struct mdp5_smp *smp)
-{
- kfree(smp);
-}
struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms, const struct mdp5_smp_block *cfg)
{
+ struct drm_device *dev = mdp5_kms->dev;
struct mdp5_smp_state *state;
struct mdp5_global_state *global_state;
struct mdp5_smp *smp;
- int ret;
- smp = kzalloc(sizeof(*smp), GFP_KERNEL);
- if (unlikely(!smp)) {
- ret = -ENOMEM;
- goto fail;
- }
+ smp = devm_kzalloc(dev->dev, sizeof(*smp), GFP_KERNEL);
+ if (unlikely(!smp))
+ return ERR_PTR(-ENOMEM);
smp->dev = mdp5_kms->dev;
smp->blk_cnt = cfg->mmb_count;
@@ -400,9 +394,4 @@ struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms, const struct mdp5_smp_
memcpy(smp->reserved, cfg->reserved, sizeof(smp->reserved));
return smp;
-fail:
- if (smp)
- mdp5_smp_destroy(smp);
-
- return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h
index ba5618e136c3..d8b6a11413d9 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h
@@ -68,7 +68,6 @@ struct mdp5_smp;
struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms,
const struct mdp5_smp_block *cfg);
-void mdp5_smp_destroy(struct mdp5_smp *smp);
void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p);
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
index 8e3b677f35e6..03f4951c49f4 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.c
+++ b/drivers/gpu/drm/msm/dp/dp_aux.c
@@ -291,6 +291,10 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
return -EINVAL;
}
+ ret = pm_runtime_resume_and_get(dp_aux->dev);
+ if (ret)
+ return ret;
+
mutex_lock(&aux->mutex);
if (!aux->initted) {
ret = -EIO;
@@ -364,6 +368,7 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
exit:
mutex_unlock(&aux->mutex);
+ pm_runtime_put_sync(dp_aux->dev);
return ret;
}
@@ -474,7 +479,6 @@ void dp_aux_deinit(struct drm_dp_aux *dp_aux)
int dp_aux_register(struct drm_dp_aux *dp_aux)
{
- struct dp_aux_private *aux;
int ret;
if (!dp_aux) {
@@ -482,12 +486,7 @@ int dp_aux_register(struct drm_dp_aux *dp_aux)
return -EINVAL;
}
- aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
-
- aux->dp_aux.name = "dpu_dp_aux";
- aux->dp_aux.dev = aux->dev;
- aux->dp_aux.transfer = dp_aux_transfer;
- ret = drm_dp_aux_register(&aux->dp_aux);
+ ret = drm_dp_aux_register(dp_aux);
if (ret) {
DRM_ERROR("%s: failed to register drm aux: %d\n", __func__,
ret);
@@ -502,6 +501,21 @@ void dp_aux_unregister(struct drm_dp_aux *dp_aux)
drm_dp_aux_unregister(dp_aux);
}
+static int dp_wait_hpd_asserted(struct drm_dp_aux *dp_aux,
+ unsigned long wait_us)
+{
+ int ret;
+ struct dp_aux_private *aux;
+
+ aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+
+ pm_runtime_get_sync(aux->dev);
+ ret = dp_catalog_aux_wait_for_hpd_connect_state(aux->catalog);
+ pm_runtime_put_sync(aux->dev);
+
+ return ret;
+}
+
struct drm_dp_aux *dp_aux_get(struct device *dev, struct dp_catalog *catalog,
bool is_edp)
{
@@ -525,6 +539,17 @@ struct drm_dp_aux *dp_aux_get(struct device *dev, struct dp_catalog *catalog,
aux->catalog = catalog;
aux->retry_cnt = 0;
+ /*
+ * Use the drm_dp_aux_init() to use the aux adapter
+ * before registering AUX with the DRM device so that
+ * msm eDP panel can be detected by generic_dep_panel_probe().
+ */
+ aux->dp_aux.name = "dpu_dp_aux";
+ aux->dp_aux.dev = dev;
+ aux->dp_aux.transfer = dp_aux_transfer;
+ aux->dp_aux.wait_hpd_asserted = dp_wait_hpd_asserted;
+ drm_dp_aux_init(&aux->dp_aux);
+
return &aux->dp_aux;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.c b/drivers/gpu/drm/msm/dp/dp_debug.c
index 3bba901afe33..6c281dc095b9 100644
--- a/drivers/gpu/drm/msm/dp/dp_debug.c
+++ b/drivers/gpu/drm/msm/dp/dp_debug.c
@@ -19,13 +19,9 @@
#define DEBUG_NAME "msm_dp"
struct dp_debug_private {
- struct dentry *root;
-
struct dp_link *link;
struct dp_panel *panel;
struct drm_connector *connector;
- struct device *dev;
- struct drm_device *drm_dev;
struct dp_debug dp_debug;
};
@@ -204,35 +200,33 @@ static const struct file_operations test_active_fops = {
.write = dp_test_active_write
};
-static void dp_debug_init(struct dp_debug *dp_debug, struct drm_minor *minor)
+static void dp_debug_init(struct dp_debug *dp_debug, struct dentry *root, bool is_edp)
{
- char path[64];
struct dp_debug_private *debug = container_of(dp_debug,
struct dp_debug_private, dp_debug);
- snprintf(path, sizeof(path), "msm_dp-%s", debug->connector->name);
-
- debug->root = debugfs_create_dir(path, minor->debugfs_root);
-
- debugfs_create_file("dp_debug", 0444, debug->root,
+ debugfs_create_file("dp_debug", 0444, root,
debug, &dp_debug_fops);
- debugfs_create_file("msm_dp_test_active", 0444,
- debug->root,
- debug, &test_active_fops);
+ if (!is_edp) {
+ debugfs_create_file("msm_dp_test_active", 0444,
+ root,
+ debug, &test_active_fops);
- debugfs_create_file("msm_dp_test_data", 0444,
- debug->root,
- debug, &dp_test_data_fops);
+ debugfs_create_file("msm_dp_test_data", 0444,
+ root,
+ debug, &dp_test_data_fops);
- debugfs_create_file("msm_dp_test_type", 0444,
- debug->root,
- debug, &dp_test_type_fops);
+ debugfs_create_file("msm_dp_test_type", 0444,
+ root,
+ debug, &dp_test_type_fops);
+ }
}
struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel,
struct dp_link *link,
- struct drm_connector *connector, struct drm_minor *minor)
+ struct drm_connector *connector,
+ struct dentry *root, bool is_edp)
{
struct dp_debug_private *debug;
struct dp_debug *dp_debug;
@@ -253,46 +247,15 @@ struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel,
debug->dp_debug.debug_en = false;
debug->link = link;
debug->panel = panel;
- debug->dev = dev;
- debug->drm_dev = minor->dev;
- debug->connector = connector;
dp_debug = &debug->dp_debug;
dp_debug->vdisplay = 0;
dp_debug->hdisplay = 0;
dp_debug->vrefresh = 0;
- dp_debug_init(dp_debug, minor);
+ dp_debug_init(dp_debug, root, is_edp);
return dp_debug;
error:
return ERR_PTR(rc);
}
-
-static int dp_debug_deinit(struct dp_debug *dp_debug)
-{
- struct dp_debug_private *debug;
-
- if (!dp_debug)
- return -EINVAL;
-
- debug = container_of(dp_debug, struct dp_debug_private, dp_debug);
-
- debugfs_remove_recursive(debug->root);
-
- return 0;
-}
-
-void dp_debug_put(struct dp_debug *dp_debug)
-{
- struct dp_debug_private *debug;
-
- if (!dp_debug)
- return;
-
- debug = container_of(dp_debug, struct dp_debug_private, dp_debug);
-
- dp_debug_deinit(dp_debug);
-
- devm_kfree(debug->dev, debug);
-}
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.h b/drivers/gpu/drm/msm/dp/dp_debug.h
index 124227873d58..9b3b2e702f65 100644
--- a/drivers/gpu/drm/msm/dp/dp_debug.h
+++ b/drivers/gpu/drm/msm/dp/dp_debug.h
@@ -34,7 +34,8 @@ struct dp_debug {
* @panel: instance of panel module
* @link: instance of link module
* @connector: double pointer to display connector
- * @minor: pointer to drm minor number after device registration
+ * @root: connector's debugfs root
+ * @is_edp: set for eDP connectors / panels
* return: pointer to allocated debug module data
*
* This function sets up the debug module and provides a way
@@ -43,31 +44,21 @@ struct dp_debug {
struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel,
struct dp_link *link,
struct drm_connector *connector,
- struct drm_minor *minor);
-
-/**
- * dp_debug_put()
- *
- * Cleans up dp_debug instance
- *
- * @dp_debug: instance of dp_debug
- */
-void dp_debug_put(struct dp_debug *dp_debug);
+ struct dentry *root,
+ bool is_edp);
#else
static inline
struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel,
struct dp_link *link,
- struct drm_connector *connector, struct drm_minor *minor)
+ struct drm_connector *connector,
+ struct dentry *root,
+ bool is_edp)
{
return ERR_PTR(-EINVAL);
}
-static inline void dp_debug_put(struct dp_debug *dp_debug)
-{
-}
-
#endif /* defined(CONFIG_DEBUG_FS) */
#endif /* _DP_DEBUG_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index 1b88fb52726f..d37d599aec27 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -49,13 +49,11 @@ enum {
ST_CONNECTED,
ST_DISCONNECT_PENDING,
ST_DISPLAY_OFF,
- ST_SUSPENDED,
};
enum {
EV_NO_EVENT,
/* hpd events */
- EV_HPD_INIT_SETUP,
EV_HPD_PLUG_INT,
EV_IRQ_HPD_INT,
EV_HPD_UNPLUG_INT,
@@ -170,6 +168,11 @@ static const struct msm_dp_desc sm8350_dp_descs[] = {
{}
};
+static const struct msm_dp_desc sm8650_dp_descs[] = {
+ { .io_start = 0x0af54000, .id = MSM_DP_CONTROLLER_0, .connector_type = DRM_MODE_CONNECTOR_DisplayPort },
+ {}
+};
+
static const struct of_device_id dp_dt_match[] = {
{ .compatible = "qcom,sc7180-dp", .data = &sc7180_dp_descs },
{ .compatible = "qcom,sc7280-dp", .data = &sc7280_dp_descs },
@@ -180,6 +183,7 @@ static const struct of_device_id dp_dt_match[] = {
{ .compatible = "qcom,sc8280xp-edp", .data = &sc8280xp_edp_descs },
{ .compatible = "qcom,sdm845-dp", .data = &sc7180_dp_descs },
{ .compatible = "qcom,sm8350-dp", .data = &sm8350_dp_descs },
+ { .compatible = "qcom,sm8650-dp", .data = &sm8650_dp_descs },
{}
};
@@ -275,11 +279,6 @@ static int dp_display_bind(struct device *dev, struct device *master,
dp->dp_display.drm_dev = drm;
priv->dp[dp->id] = &dp->dp_display;
- rc = dp->parser->parse(dp->parser);
- if (rc) {
- DRM_ERROR("device tree parsing failed\n");
- goto end;
- }
dp->drm_dev = drm;
@@ -290,11 +289,6 @@ static int dp_display_bind(struct device *dev, struct device *master,
goto end;
}
- rc = dp_power_client_init(dp->power);
- if (rc) {
- DRM_ERROR("Power client create failed\n");
- goto end;
- }
rc = dp_register_audio_driver(dev, dp->audio);
if (rc) {
@@ -319,15 +313,10 @@ static void dp_display_unbind(struct device *dev, struct device *master,
struct dp_display_private *dp = dev_get_dp_display_private(dev);
struct msm_drm_private *priv = dev_get_drvdata(master);
- /* disable all HPD interrupts */
- if (dp->core_initialized)
- dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false);
-
kthread_stop(dp->ev_tsk);
of_dp_aux_depopulate_bus(dp->aux);
- dp_power_client_deinit(dp->power);
dp_unregister_audio_driver(dev, dp->audio);
dp_aux_unregister(dp->aux);
dp->drm_dev = NULL;
@@ -340,27 +329,10 @@ static const struct component_ops dp_display_comp_ops = {
.unbind = dp_display_unbind,
};
-static void dp_display_send_hpd_event(struct msm_dp *dp_display)
-{
- struct dp_display_private *dp;
- struct drm_connector *connector;
-
- dp = container_of(dp_display, struct dp_display_private, dp_display);
-
- connector = dp->dp_display.connector;
- drm_helper_hpd_irq_event(connector->dev);
-}
-
-
static int dp_display_send_hpd_notification(struct dp_display_private *dp,
bool hpd)
{
- if ((hpd && dp->dp_display.is_connected) ||
- (!hpd && !dp->dp_display.is_connected)) {
- drm_dbg_dp(dp->drm_dev, "HPD already %s\n",
- (hpd ? "on" : "off"));
- return 0;
- }
+ struct drm_bridge *bridge = dp->dp_display.bridge;
/* reset video pattern flag on disconnect */
if (!hpd) {
@@ -372,11 +344,11 @@ static int dp_display_send_hpd_notification(struct dp_display_private *dp,
dp->panel->downstream_ports);
}
- dp->dp_display.is_connected = hpd;
+ dp->dp_display.link_ready = hpd;
drm_dbg_dp(dp->drm_dev, "type=%d hpd=%d\n",
dp->dp_display.connector_type, hpd);
- dp_display_send_hpd_event(&dp->dp_display);
+ drm_bridge_hpd_notify(bridge, dp->dp_display.link_ready);
return 0;
}
@@ -575,6 +547,7 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
{
u32 state;
int ret;
+ struct platform_device *pdev = dp->dp_display.pdev;
mutex_lock(&dp->event_mutex);
@@ -582,7 +555,7 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
drm_dbg_dp(dp->drm_dev, "Before, type=%d hpd_state=%d\n",
dp->dp_display.connector_type, state);
- if (state == ST_DISPLAY_OFF || state == ST_SUSPENDED) {
+ if (state == ST_DISPLAY_OFF) {
mutex_unlock(&dp->event_mutex);
return 0;
}
@@ -599,7 +572,14 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
return 0;
}
- ret = dp_display_usbpd_configure_cb(&dp->dp_display.pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret) {
+ DRM_ERROR("failed to pm_runtime_resume\n");
+ mutex_unlock(&dp->event_mutex);
+ return ret;
+ }
+
+ ret = dp_display_usbpd_configure_cb(&pdev->dev);
if (ret) { /* link train failed */
dp->hpd_state = ST_DISCONNECTED;
} else {
@@ -631,6 +611,7 @@ static void dp_display_handle_plugged_change(struct msm_dp *dp_display,
static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
{
u32 state;
+ struct platform_device *pdev = dp->dp_display.pdev;
mutex_lock(&dp->event_mutex);
@@ -681,6 +662,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
dp->dp_display.connector_type, state);
/* uevent will complete disconnection part */
+ pm_runtime_put_sync(&pdev->dev);
mutex_unlock(&dp->event_mutex);
return 0;
}
@@ -696,7 +678,7 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
drm_dbg_dp(dp->drm_dev, "Before, type=%d hpd_state=%d\n",
dp->dp_display.connector_type, state);
- if (state == ST_DISPLAY_OFF || state == ST_SUSPENDED) {
+ if (state == ST_DISPLAY_OFF) {
mutex_unlock(&dp->event_mutex);
return 0;
}
@@ -720,7 +702,6 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
static void dp_display_deinit_sub_modules(struct dp_display_private *dp)
{
- dp_debug_put(dp->debug);
dp_audio_put(dp->audio);
dp_panel_put(dp->panel);
dp_aux_put(dp->aux);
@@ -918,7 +899,7 @@ int dp_display_set_plugged_cb(struct msm_dp *dp_display,
dp_display->plugged_cb = fn;
dp_display->codec_dev = codec_dev;
- plugged = dp_display->is_connected;
+ plugged = dp_display->link_ready;
dp_display_handle_plugged_change(dp_display, plugged);
return 0;
@@ -1108,9 +1089,6 @@ static int hpd_event_thread(void *data)
spin_unlock_irqrestore(&dp_priv->event_lock, flag);
switch (todo->event_id) {
- case EV_HPD_INIT_SETUP:
- dp_display_host_init(dp_priv);
- break;
case EV_HPD_PLUG_INT:
dp_hpd_plug_handle(dp_priv, todo->data);
break;
@@ -1189,27 +1167,21 @@ static irqreturn_t dp_display_irq_handler(int irq, void *dev_id)
return ret;
}
-int dp_display_request_irq(struct msm_dp *dp_display)
+static int dp_display_request_irq(struct dp_display_private *dp)
{
int rc = 0;
- struct dp_display_private *dp;
-
- if (!dp_display) {
- DRM_ERROR("invalid input\n");
- return -EINVAL;
- }
+ struct platform_device *pdev = dp->dp_display.pdev;
- dp = container_of(dp_display, struct dp_display_private, dp_display);
-
- dp->irq = irq_of_parse_and_map(dp->dp_display.pdev->dev.of_node, 0);
- if (!dp->irq) {
+ dp->irq = platform_get_irq(pdev, 0);
+ if (dp->irq < 0) {
DRM_ERROR("failed to get irq\n");
- return -EINVAL;
+ return dp->irq;
}
- rc = devm_request_irq(dp_display->drm_dev->dev, dp->irq,
- dp_display_irq_handler,
- IRQF_TRIGGER_HIGH, "dp_display_isr", dp);
+ rc = devm_request_irq(&pdev->dev, dp->irq, dp_display_irq_handler,
+ IRQF_TRIGGER_HIGH|IRQF_NO_AUTOEN,
+ "dp_display_isr", dp);
+
if (rc < 0) {
DRM_ERROR("failed to request IRQ%u: %d\n",
dp->irq, rc);
@@ -1238,6 +1210,29 @@ static const struct msm_dp_desc *dp_display_get_desc(struct platform_device *pde
return NULL;
}
+static int dp_display_get_next_bridge(struct msm_dp *dp);
+
+static int dp_display_probe_tail(struct device *dev)
+{
+ struct msm_dp *dp = dev_get_drvdata(dev);
+ int ret;
+
+ ret = dp_display_get_next_bridge(dp);
+ if (ret)
+ return ret;
+
+ ret = component_add(dev, &dp_display_comp_ops);
+ if (ret)
+ DRM_ERROR("component add failed, rc=%d\n", ret);
+
+ return ret;
+}
+
+static int dp_auxbus_done_probe(struct drm_dp_aux *aux)
+{
+ return dp_display_probe_tail(aux->dev);
+}
+
static int dp_display_probe(struct platform_device *pdev)
{
int rc = 0;
@@ -1271,6 +1266,18 @@ static int dp_display_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
}
+ rc = dp->parser->parse(dp->parser);
+ if (rc) {
+ DRM_ERROR("device tree parsing failed\n");
+ goto err;
+ }
+
+ rc = dp_power_client_init(dp->power);
+ if (rc) {
+ DRM_ERROR("Power client create failed\n");
+ goto err;
+ }
+
/* setup event q */
mutex_init(&dp->event_mutex);
init_waitqueue_head(&dp->event_q);
@@ -1283,13 +1290,31 @@ static int dp_display_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, &dp->dp_display);
- rc = component_add(&pdev->dev, &dp_display_comp_ops);
- if (rc) {
- DRM_ERROR("component add failed, rc=%d\n", rc);
- dp_display_deinit_sub_modules(dp);
+ rc = devm_pm_runtime_enable(&pdev->dev);
+ if (rc)
+ goto err;
+
+ rc = dp_display_request_irq(dp);
+ if (rc)
+ goto err;
+
+ if (dp->dp_display.is_edp) {
+ rc = devm_of_dp_aux_populate_bus(dp->aux, dp_auxbus_done_probe);
+ if (rc) {
+ DRM_ERROR("eDP auxbus population failed, rc=%d\n", rc);
+ goto err;
+ }
+ } else {
+ rc = dp_display_probe_tail(&pdev->dev);
+ if (rc)
+ goto err;
}
return rc;
+
+err:
+ dp_display_deinit_sub_modules(dp);
+ return rc;
}
static void dp_display_remove(struct platform_device *pdev)
@@ -1298,113 +1323,50 @@ static void dp_display_remove(struct platform_device *pdev)
component_del(&pdev->dev, &dp_display_comp_ops);
dp_display_deinit_sub_modules(dp);
-
platform_set_drvdata(pdev, NULL);
}
-static int dp_pm_resume(struct device *dev)
+static int dp_pm_runtime_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct msm_dp *dp_display = platform_get_drvdata(pdev);
- struct dp_display_private *dp;
- int sink_count = 0;
-
- dp = container_of(dp_display, struct dp_display_private, dp_display);
-
- mutex_lock(&dp->event_mutex);
-
- drm_dbg_dp(dp->drm_dev,
- "Before, type=%d core_inited=%d phy_inited=%d power_on=%d\n",
- dp->dp_display.connector_type, dp->core_initialized,
- dp->phy_initialized, dp_display->power_on);
-
- /* start from disconnected state */
- dp->hpd_state = ST_DISCONNECTED;
-
- /* turn on dp ctrl/phy */
- dp_display_host_init(dp);
-
- if (dp_display->is_edp)
- dp_catalog_ctrl_hpd_enable(dp->catalog);
+ struct dp_display_private *dp = dev_get_dp_display_private(dev);
- if (dp_catalog_link_is_connected(dp->catalog)) {
- /*
- * set sink to normal operation mode -- D0
- * before dpcd read
- */
- dp_display_host_phy_init(dp);
- dp_link_psm_config(dp->link, &dp->panel->link_info, false);
- sink_count = drm_dp_read_sink_count(dp->aux);
- if (sink_count < 0)
- sink_count = 0;
+ disable_irq(dp->irq);
+ if (dp->dp_display.is_edp) {
dp_display_host_phy_exit(dp);
+ dp_catalog_ctrl_hpd_disable(dp->catalog);
}
-
- dp->link->sink_count = sink_count;
- /*
- * can not declared display is connected unless
- * HDMI cable is plugged in and sink_count of
- * dongle become 1
- * also only signal audio when disconnected
- */
- if (dp->link->sink_count) {
- dp->dp_display.is_connected = true;
- } else {
- dp->dp_display.is_connected = false;
- dp_display_handle_plugged_change(dp_display, false);
- }
-
- drm_dbg_dp(dp->drm_dev,
- "After, type=%d sink=%d conn=%d core_init=%d phy_init=%d power=%d\n",
- dp->dp_display.connector_type, dp->link->sink_count,
- dp->dp_display.is_connected, dp->core_initialized,
- dp->phy_initialized, dp_display->power_on);
-
- mutex_unlock(&dp->event_mutex);
+ dp_display_host_deinit(dp);
return 0;
}
-static int dp_pm_suspend(struct device *dev)
+static int dp_pm_runtime_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct msm_dp *dp_display = platform_get_drvdata(pdev);
- struct dp_display_private *dp;
-
- dp = container_of(dp_display, struct dp_display_private, dp_display);
-
- mutex_lock(&dp->event_mutex);
-
- drm_dbg_dp(dp->drm_dev,
- "Before, type=%d core_inited=%d phy_inited=%d power_on=%d\n",
- dp->dp_display.connector_type, dp->core_initialized,
- dp->phy_initialized, dp_display->power_on);
-
- /* mainlink enabled */
- if (dp_power_clk_status(dp->power, DP_CTRL_PM))
- dp_ctrl_off_link_stream(dp->ctrl);
-
- dp_display_host_phy_exit(dp);
-
- /* host_init will be called at pm_resume */
- dp_display_host_deinit(dp);
-
- dp->hpd_state = ST_SUSPENDED;
-
- drm_dbg_dp(dp->drm_dev,
- "After, type=%d core_inited=%d phy_inited=%d power_on=%d\n",
- dp->dp_display.connector_type, dp->core_initialized,
- dp->phy_initialized, dp_display->power_on);
+ struct dp_display_private *dp = dev_get_dp_display_private(dev);
- mutex_unlock(&dp->event_mutex);
+ /*
+ * for eDP, host cotroller, HPD block and PHY are enabled here
+ * but with HPD irq disabled
+ *
+ * for DP, only host controller is enabled here.
+ * HPD block is enabled at dp_bridge_hpd_enable()
+ * PHY will be enabled at plugin handler later
+ */
+ dp_display_host_init(dp);
+ if (dp->dp_display.is_edp) {
+ dp_catalog_ctrl_hpd_enable(dp->catalog);
+ dp_display_host_phy_init(dp);
+ }
+ enable_irq(dp->irq);
return 0;
}
static const struct dev_pm_ops dp_pm_ops = {
- .suspend = dp_pm_suspend,
- .resume = dp_pm_resume,
+ SET_RUNTIME_PM_OPS(dp_pm_runtime_suspend, dp_pm_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static struct platform_driver dp_display_driver = {
@@ -1434,19 +1396,6 @@ void __exit msm_dp_unregister(void)
platform_driver_unregister(&dp_display_driver);
}
-void msm_dp_irq_postinstall(struct msm_dp *dp_display)
-{
- struct dp_display_private *dp;
-
- if (!dp_display)
- return;
-
- dp = container_of(dp_display, struct dp_display_private, dp_display);
-
- if (!dp_display->is_edp)
- dp_add_event(dp, EV_HPD_INIT_SETUP, 0, 0);
-}
-
bool msm_dp_wide_bus_available(const struct msm_dp *dp_display)
{
struct dp_display_private *dp;
@@ -1456,7 +1405,7 @@ bool msm_dp_wide_bus_available(const struct msm_dp *dp_display)
return dp->wide_bus_en;
}
-void msm_dp_debugfs_init(struct msm_dp *dp_display, struct drm_minor *minor)
+void dp_display_debugfs_init(struct msm_dp *dp_display, struct dentry *root, bool is_edp)
{
struct dp_display_private *dp;
struct device *dev;
@@ -1467,7 +1416,7 @@ void msm_dp_debugfs_init(struct msm_dp *dp_display, struct drm_minor *minor)
dp->debug = dp_debug_get(dev, dp->panel,
dp->link, dp->dp_display.connector,
- minor);
+ root, is_edp);
if (IS_ERR(dp->debug)) {
rc = PTR_ERR(dp->debug);
DRM_ERROR("failed to initialize debug, rc = %d\n", rc);
@@ -1479,33 +1428,8 @@ static int dp_display_get_next_bridge(struct msm_dp *dp)
{
int rc;
struct dp_display_private *dp_priv;
- struct device_node *aux_bus;
- struct device *dev;
dp_priv = container_of(dp, struct dp_display_private, dp_display);
- dev = &dp_priv->dp_display.pdev->dev;
- aux_bus = of_get_child_by_name(dev->of_node, "aux-bus");
-
- if (aux_bus && dp->is_edp) {
- dp_display_host_init(dp_priv);
- dp_catalog_ctrl_hpd_enable(dp_priv->catalog);
- dp_display_host_phy_init(dp_priv);
-
- /*
- * The code below assumes that the panel will finish probing
- * by the time devm_of_dp_aux_populate_ep_devices() returns.
- * This isn't a great assumption since it will fail if the
- * panel driver is probed asynchronously but is the best we
- * can do without a bigger driver reorganization.
- */
- rc = of_dp_aux_populate_bus(dp_priv->aux, NULL);
- of_node_put(aux_bus);
- if (rc)
- goto error;
- } else if (dp->is_edp) {
- DRM_ERROR("eDP aux_bus not found\n");
- return -ENODEV;
- }
/*
* External bridges are mandatory for eDP interfaces: one has to
@@ -1514,21 +1438,13 @@ static int dp_display_get_next_bridge(struct msm_dp *dp)
* For DisplayPort interfaces external bridges are optional, so
* silently ignore an error if one is not present (-ENODEV).
*/
- rc = devm_dp_parser_find_next_bridge(dp->drm_dev->dev, dp_priv->parser);
+ rc = devm_dp_parser_find_next_bridge(&dp->pdev->dev, dp_priv->parser);
if (!dp->is_edp && rc == -ENODEV)
return 0;
- if (!rc) {
+ if (!rc)
dp->next_bridge = dp_priv->parser->next_bridge;
- return 0;
- }
-error:
- if (dp->is_edp) {
- of_dp_aux_depopulate_bus(dp_priv->aux);
- dp_display_host_phy_exit(dp_priv);
- dp_display_host_deinit(dp_priv);
- }
return rc;
}
@@ -1542,16 +1458,6 @@ int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev,
dp_priv = container_of(dp_display, struct dp_display_private, dp_display);
- ret = dp_display_request_irq(dp_display);
- if (ret) {
- DRM_ERROR("request_irq failed, ret=%d\n", ret);
- return ret;
- }
-
- ret = dp_display_get_next_bridge(dp_display);
- if (ret)
- return ret;
-
ret = dp_bridge_init(dp_display, dev, encoder);
if (ret) {
DRM_DEV_ERROR(dev->dev,
@@ -1593,6 +1499,11 @@ void dp_bridge_atomic_enable(struct drm_bridge *drm_bridge,
dp_hpd_plug_handle(dp_display, 0);
mutex_lock(&dp_display->event_mutex);
+ if (pm_runtime_resume_and_get(&dp->pdev->dev)) {
+ DRM_ERROR("failed to pm_runtime_resume\n");
+ mutex_unlock(&dp_display->event_mutex);
+ return;
+ }
state = dp_display->hpd_state;
if (state != ST_DISPLAY_OFF && state != ST_MAINLINK_READY) {
@@ -1657,10 +1568,9 @@ void dp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge,
mutex_lock(&dp_display->event_mutex);
state = dp_display->hpd_state;
- if (state != ST_DISCONNECT_PENDING && state != ST_CONNECTED) {
- mutex_unlock(&dp_display->event_mutex);
- return;
- }
+ if (state != ST_DISCONNECT_PENDING && state != ST_CONNECTED)
+ drm_dbg_dp(dp->drm_dev, "type=%d wrong hpd_state=%d\n",
+ dp->connector_type, state);
dp_display_disable(dp_display);
@@ -1673,6 +1583,8 @@ void dp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge,
}
drm_dbg_dp(dp->drm_dev, "type=%d Done\n", dp->connector_type);
+
+ pm_runtime_put_sync(&dp->pdev->dev);
mutex_unlock(&dp_display->event_mutex);
}
@@ -1711,7 +1623,21 @@ void dp_bridge_hpd_enable(struct drm_bridge *bridge)
struct msm_dp *dp_display = dp_bridge->dp_display;
struct dp_display_private *dp = container_of(dp_display, struct dp_display_private, dp_display);
+ /*
+ * this is for external DP with hpd irq enabled case,
+ * step-1: dp_pm_runtime_resume() enable dp host only
+ * step-2: enable hdp block and have hpd irq enabled here
+ * step-3: waiting for plugin irq while phy is not initialized
+ * step-4: DP PHY is initialized at plugin handler before link training
+ *
+ */
mutex_lock(&dp->event_mutex);
+ if (pm_runtime_resume_and_get(&dp_display->pdev->dev)) {
+ DRM_ERROR("failed to resume power\n");
+ mutex_unlock(&dp->event_mutex);
+ return;
+ }
+
dp_catalog_ctrl_hpd_enable(dp->catalog);
/* enable HDP interrupts */
@@ -1733,6 +1659,8 @@ void dp_bridge_hpd_disable(struct drm_bridge *bridge)
dp_catalog_ctrl_hpd_disable(dp->catalog);
dp_display->internal_hpd = false;
+
+ pm_runtime_put_sync(&dp_display->pdev->dev);
mutex_unlock(&dp->event_mutex);
}
@@ -1747,13 +1675,8 @@ void dp_bridge_hpd_notify(struct drm_bridge *bridge,
if (dp_display->internal_hpd)
return;
- if (!dp->core_initialized) {
- drm_dbg_dp(dp->drm_dev, "not initialized\n");
- return;
- }
-
- if (!dp_display->is_connected && status == connector_status_connected)
+ if (!dp_display->link_ready && status == connector_status_connected)
dp_add_event(dp, EV_HPD_PLUG_INT, 0, 0);
- else if (dp_display->is_connected && status == connector_status_disconnected)
+ else if (dp_display->link_ready && status == connector_status_disconnected)
dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0);
}
diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
index f66cdbc35785..102f3507d824 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.h
+++ b/drivers/gpu/drm/msm/dp/dp_display.h
@@ -17,7 +17,7 @@ struct msm_dp {
struct drm_bridge *bridge;
struct drm_connector *connector;
struct drm_bridge *next_bridge;
- bool is_connected;
+ bool link_ready;
bool audio_enabled;
bool power_on;
unsigned int connector_type;
@@ -36,11 +36,11 @@ struct msm_dp {
int dp_display_set_plugged_cb(struct msm_dp *dp_display,
hdmi_codec_plugged_cb fn, struct device *codec_dev);
int dp_display_get_modes(struct msm_dp *dp_display);
-int dp_display_request_irq(struct msm_dp *dp_display);
bool dp_display_check_video_test(struct msm_dp *dp_display);
int dp_display_get_test_bpp(struct msm_dp *dp_display);
void dp_display_signal_audio_start(struct msm_dp *dp_display);
void dp_display_signal_audio_complete(struct msm_dp *dp_display);
void dp_display_set_psr(struct msm_dp *dp, bool enter);
+void dp_display_debugfs_init(struct msm_dp *dp_display, struct dentry *dentry, bool is_edp);
#endif /* _DP_DISPLAY_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
index e3bdd7dd4cdc..46e6889037e8 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -24,10 +24,10 @@ static enum drm_connector_status dp_bridge_detect(struct drm_bridge *bridge)
dp = to_dp_bridge(bridge)->dp_display;
- drm_dbg_dp(dp->drm_dev, "is_connected = %s\n",
- (dp->is_connected) ? "true" : "false");
+ drm_dbg_dp(dp->drm_dev, "link_ready = %s\n",
+ (dp->link_ready) ? "true" : "false");
- return (dp->is_connected) ? connector_status_connected :
+ return (dp->link_ready) ? connector_status_connected :
connector_status_disconnected;
}
@@ -40,8 +40,8 @@ static int dp_bridge_atomic_check(struct drm_bridge *bridge,
dp = to_dp_bridge(bridge)->dp_display;
- drm_dbg_dp(dp->drm_dev, "is_connected = %s\n",
- (dp->is_connected) ? "true" : "false");
+ drm_dbg_dp(dp->drm_dev, "link_ready = %s\n",
+ (dp->link_ready) ? "true" : "false");
/*
* There is no protection in the DRM framework to check if the display
@@ -55,7 +55,7 @@ static int dp_bridge_atomic_check(struct drm_bridge *bridge,
* After that this piece of code can be removed.
*/
if (bridge->ops & DRM_BRIDGE_OP_HPD)
- return (dp->is_connected) ? 0 : -ENOTCONN;
+ return (dp->link_ready) ? 0 : -ENOTCONN;
return 0;
}
@@ -78,7 +78,7 @@ static int dp_bridge_get_modes(struct drm_bridge *bridge, struct drm_connector *
dp = to_dp_bridge(bridge)->dp_display;
/* pluggable case assumes EDID is read when HPD */
- if (dp->is_connected) {
+ if (dp->link_ready) {
rc = dp_display_get_modes(dp);
if (rc <= 0) {
DRM_ERROR("failed to get DP sink modes, rc=%d\n", rc);
@@ -90,6 +90,13 @@ static int dp_bridge_get_modes(struct drm_bridge *bridge, struct drm_connector *
return rc;
}
+static void dp_bridge_debugfs_init(struct drm_bridge *bridge, struct dentry *root)
+{
+ struct msm_dp *dp = to_dp_bridge(bridge)->dp_display;
+
+ dp_display_debugfs_init(dp, root, false);
+}
+
static const struct drm_bridge_funcs dp_bridge_ops = {
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
@@ -105,6 +112,7 @@ static const struct drm_bridge_funcs dp_bridge_ops = {
.hpd_enable = dp_bridge_hpd_enable,
.hpd_disable = dp_bridge_hpd_disable,
.hpd_notify = dp_bridge_hpd_notify,
+ .debugfs_init = dp_bridge_debugfs_init,
};
static int edp_bridge_atomic_check(struct drm_bridge *drm_bridge,
@@ -260,6 +268,13 @@ static enum drm_mode_status edp_bridge_mode_valid(struct drm_bridge *bridge,
return MODE_OK;
}
+static void edp_bridge_debugfs_init(struct drm_bridge *bridge, struct dentry *root)
+{
+ struct msm_dp *dp = to_dp_bridge(bridge)->dp_display;
+
+ dp_display_debugfs_init(dp, root, true);
+}
+
static const struct drm_bridge_funcs edp_bridge_ops = {
.atomic_enable = edp_bridge_atomic_enable,
.atomic_disable = edp_bridge_atomic_disable,
@@ -270,6 +285,7 @@ static const struct drm_bridge_funcs edp_bridge_ops = {
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_check = edp_bridge_atomic_check,
+ .debugfs_init = edp_bridge_debugfs_init,
};
int dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev,
diff --git a/drivers/gpu/drm/msm/dp/dp_power.c b/drivers/gpu/drm/msm/dp/dp_power.c
index 5cb84ca40e9e..c4843dd69f47 100644
--- a/drivers/gpu/drm/msm/dp/dp_power.c
+++ b/drivers/gpu/drm/msm/dp/dp_power.c
@@ -152,45 +152,17 @@ int dp_power_client_init(struct dp_power *dp_power)
power = container_of(dp_power, struct dp_power_private, dp_power);
- pm_runtime_enable(power->dev);
-
return dp_power_clk_init(power);
}
-void dp_power_client_deinit(struct dp_power *dp_power)
-{
- struct dp_power_private *power;
-
- power = container_of(dp_power, struct dp_power_private, dp_power);
-
- pm_runtime_disable(power->dev);
-}
-
int dp_power_init(struct dp_power *dp_power)
{
- int rc = 0;
- struct dp_power_private *power = NULL;
-
- power = container_of(dp_power, struct dp_power_private, dp_power);
-
- pm_runtime_get_sync(power->dev);
-
- rc = dp_power_clk_enable(dp_power, DP_CORE_PM, true);
- if (rc)
- pm_runtime_put_sync(power->dev);
-
- return rc;
+ return dp_power_clk_enable(dp_power, DP_CORE_PM, true);
}
int dp_power_deinit(struct dp_power *dp_power)
{
- struct dp_power_private *power;
-
- power = container_of(dp_power, struct dp_power_private, dp_power);
-
- dp_power_clk_enable(dp_power, DP_CORE_PM, false);
- pm_runtime_put_sync(power->dev);
- return 0;
+ return dp_power_clk_enable(dp_power, DP_CORE_PM, false);
}
struct dp_power *dp_power_get(struct device *dev, struct dp_parser *parser)
diff --git a/drivers/gpu/drm/msm/dp/dp_power.h b/drivers/gpu/drm/msm/dp/dp_power.h
index a3dec200785e..55ada51edb57 100644
--- a/drivers/gpu/drm/msm/dp/dp_power.h
+++ b/drivers/gpu/drm/msm/dp/dp_power.h
@@ -81,17 +81,6 @@ int dp_power_clk_enable(struct dp_power *power, enum dp_pm_type pm_type,
int dp_power_client_init(struct dp_power *power);
/**
- * dp_power_clinet_deinit() - de-initialize clock and regulator modules
- *
- * @power: instance of power module
- * return: 0 for success, error for failure.
- *
- * This API will de-initialize the DisplayPort's clocks and regulator
- * modules.
- */
-void dp_power_client_deinit(struct dp_power *power);
-
-/**
* dp_power_get() - configure and get the DisplayPort power module data
*
* @parser: instance of parser module
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index 1f98ff74ceb0..10ba7d153d1c 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -190,6 +190,21 @@ static const struct msm_dsi_config sm8550_dsi_cfg = {
},
};
+static const struct regulator_bulk_data sm8650_dsi_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 16600 }, /* 1.2 V */
+};
+
+static const struct msm_dsi_config sm8650_dsi_cfg = {
+ .io_offset = DSI_6G_REG_SHIFT,
+ .regulator_data = sm8650_dsi_regulators,
+ .num_regulators = ARRAY_SIZE(sm8650_dsi_regulators),
+ .bus_clk_names = dsi_v2_4_clk_names,
+ .num_bus_clks = ARRAY_SIZE(dsi_v2_4_clk_names),
+ .io_start = {
+ { 0xae94000, 0xae96000 },
+ },
+};
+
static const struct regulator_bulk_data sc7280_dsi_regulators[] = {
{ .supply = "vdda", .init_load_uA = 8350 }, /* 1.2 V */
{ .supply = "refgen" },
@@ -281,6 +296,8 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
&sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_7_0,
&sm8550_dsi_cfg, &msm_dsi_6g_v2_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_8_0,
+ &sm8650_dsi_cfg, &msm_dsi_6g_v2_host_ops},
};
const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor)
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
index 43f0dd74edb6..4c9b4b37681b 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
@@ -28,6 +28,7 @@
#define MSM_DSI_6G_VER_MINOR_V2_5_0 0x20050000
#define MSM_DSI_6G_VER_MINOR_V2_6_0 0x20060000
#define MSM_DSI_6G_VER_MINOR_V2_7_0 0x20070000
+#define MSM_DSI_6G_VER_MINOR_V2_8_0 0x20080000
#define MSM_DSI_V2_VER_MINOR_8064 0x0
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index 05621e5e7d63..24a347fe2998 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -516,7 +516,9 @@ static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
struct device *dev = &phy->pdev->dev;
int ret;
- pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
ret = clk_prepare_enable(phy->ahb_clk);
if (ret) {
@@ -585,6 +587,8 @@ static const struct of_device_id dsi_phy_dt_match[] = {
.data = &dsi_phy_5nm_8450_cfgs },
{ .compatible = "qcom,sm8550-dsi-phy-4nm",
.data = &dsi_phy_4nm_8550_cfgs },
+ { .compatible = "qcom,sm8650-dsi-phy-4nm",
+ .data = &dsi_phy_4nm_8650_cfgs },
#endif
{}
};
@@ -689,6 +693,10 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(phy->ahb_clk),
"Unable to get ahb clk\n");
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
+
/* PLL init will call into clk_register which requires
* register access, so we need to enable power and ahb clock.
*/
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index 8b640d174785..e4275d3ad581 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -62,6 +62,7 @@ extern const struct msm_dsi_phy_cfg dsi_phy_7nm_7280_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_5nm_8350_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_5nm_8450_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_4nm_8550_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_4nm_8650_cfgs;
struct msm_dsi_dphy_timing {
u32 clk_zero;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
index 89a6344bc865..82d015aa2d63 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
@@ -1121,6 +1121,10 @@ static const struct regulator_bulk_data dsi_phy_7nm_37750uA_regulators[] = {
{ .supply = "vdds", .init_load_uA = 37550 },
};
+static const struct regulator_bulk_data dsi_phy_7nm_98000uA_regulators[] = {
+ { .supply = "vdds", .init_load_uA = 98000 },
+};
+
static const struct regulator_bulk_data dsi_phy_7nm_97800uA_regulators[] = {
{ .supply = "vdds", .init_load_uA = 97800 },
};
@@ -1281,3 +1285,26 @@ const struct msm_dsi_phy_cfg dsi_phy_4nm_8550_cfgs = {
.num_dsi_phy = 2,
.quirks = DSI_PHY_7NM_QUIRK_V5_2,
};
+
+const struct msm_dsi_phy_cfg dsi_phy_4nm_8650_cfgs = {
+ .has_phy_lane = true,
+ .regulator_data = dsi_phy_7nm_98000uA_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_7nm_98000uA_regulators),
+ .ops = {
+ .enable = dsi_7nm_phy_enable,
+ .disable = dsi_7nm_phy_disable,
+ .pll_init = dsi_pll_7nm_init,
+ .save_pll_state = dsi_7nm_pll_save_state,
+ .restore_pll_state = dsi_7nm_pll_restore_state,
+ .set_continuous_clock = dsi_7nm_set_continuous_clock,
+ },
+ .min_pll_rate = 600000000UL,
+#ifdef CONFIG_64BIT
+ .max_pll_rate = 5000000000UL,
+#else
+ .max_pll_rate = ULONG_MAX,
+#endif
+ .io_start = { 0xae95000, 0xae97000 },
+ .num_dsi_phy = 2,
+ .quirks = DSI_PHY_7NM_QUIRK_V5_2,
+};
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
index de182c004843..7aa500d24240 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
@@ -249,7 +249,6 @@ struct i2c_adapter *msm_hdmi_i2c_init(struct hdmi *hdmi)
i2c->owner = THIS_MODULE;
- i2c->class = I2C_CLASS_DDC;
snprintf(i2c->name, sizeof(i2c->name), "msm hdmi i2c");
i2c->dev.parent = &hdmi->pdev->dev;
i2c->algo = &msm_hdmi_i2c_algorithm;
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index 04d304eed223..4494f6d1c7cb 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -304,36 +304,21 @@ int msm_debugfs_late_init(struct drm_device *dev)
return ret;
}
-void msm_debugfs_init(struct drm_minor *minor)
+static void msm_debugfs_gpu_init(struct drm_minor *minor)
{
struct drm_device *dev = minor->dev;
struct msm_drm_private *priv = dev->dev_private;
struct dentry *gpu_devfreq;
- drm_debugfs_create_files(msm_debugfs_list,
- ARRAY_SIZE(msm_debugfs_list),
- minor->debugfs_root, minor);
-
debugfs_create_file("gpu", S_IRUSR, minor->debugfs_root,
dev, &msm_gpu_fops);
- if (priv->kms) {
- drm_debugfs_create_files(msm_kms_debugfs_list,
- ARRAY_SIZE(msm_kms_debugfs_list),
- minor->debugfs_root, minor);
- debugfs_create_file("kms", S_IRUSR, minor->debugfs_root,
- dev, &msm_kms_fops);
- }
-
debugfs_create_u32("hangcheck_period_ms", 0600, minor->debugfs_root,
&priv->hangcheck_period);
debugfs_create_bool("disable_err_irq", 0600, minor->debugfs_root,
&priv->disable_err_irq);
- debugfs_create_file("shrink", S_IRWXU, minor->debugfs_root,
- dev, &shrink_fops);
-
gpu_devfreq = debugfs_create_dir("devfreq", minor->debugfs_root);
debugfs_create_bool("idle_clamp",0600, gpu_devfreq,
@@ -344,6 +329,30 @@ void msm_debugfs_init(struct drm_minor *minor)
debugfs_create_u32("downdifferential",0600, gpu_devfreq,
&priv->gpu_devfreq_config.downdifferential);
+}
+
+void msm_debugfs_init(struct drm_minor *minor)
+{
+ struct drm_device *dev = minor->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+
+ drm_debugfs_create_files(msm_debugfs_list,
+ ARRAY_SIZE(msm_debugfs_list),
+ minor->debugfs_root, minor);
+
+ if (priv->gpu_pdev)
+ msm_debugfs_gpu_init(minor);
+
+ if (priv->kms) {
+ drm_debugfs_create_files(msm_kms_debugfs_list,
+ ARRAY_SIZE(msm_kms_debugfs_list),
+ minor->debugfs_root, minor);
+ debugfs_create_file("kms", S_IRUSR, minor->debugfs_root,
+ dev, &msm_kms_fops);
+ }
+
+ debugfs_create_file("shrink", S_IRWXU, minor->debugfs_root,
+ dev, &shrink_fops);
if (priv->kms && priv->kms->funcs->debugfs_init)
priv->kms->funcs->debugfs_init(priv->kms, minor);
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 3f217b578293..50b65ffc24b1 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -37,9 +37,10 @@
* - 1.9.0 - Add MSM_SUBMIT_FENCE_SN_IN
* - 1.10.0 - Add MSM_SUBMIT_BO_NO_IMPLICIT
* - 1.11.0 - Add wait boost (MSM_WAIT_FENCE_BOOST, MSM_PREP_BOOST)
+ * - 1.12.0 - Add MSM_INFO_SET_METADATA and MSM_INFO_GET_METADATA
*/
#define MSM_VERSION_MAJOR 1
-#define MSM_VERSION_MINOR 10
+#define MSM_VERSION_MINOR 12
#define MSM_VERSION_PATCHLEVEL 0
static void msm_deinit_vram(struct drm_device *ddev);
@@ -544,6 +545,85 @@ static int msm_ioctl_gem_info_set_iova(struct drm_device *dev,
return msm_gem_set_iova(obj, ctx->aspace, iova);
}
+static int msm_ioctl_gem_info_set_metadata(struct drm_gem_object *obj,
+ __user void *metadata,
+ u32 metadata_size)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ void *buf;
+ int ret;
+
+ /* Impose a moderate upper bound on metadata size: */
+ if (metadata_size > 128) {
+ return -EOVERFLOW;
+ }
+
+ /* Use a temporary buf to keep copy_from_user() outside of gem obj lock: */
+ buf = memdup_user(metadata, metadata_size);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ ret = msm_gem_lock_interruptible(obj);
+ if (ret)
+ goto out;
+
+ msm_obj->metadata =
+ krealloc(msm_obj->metadata, metadata_size, GFP_KERNEL);
+ msm_obj->metadata_size = metadata_size;
+ memcpy(msm_obj->metadata, buf, metadata_size);
+
+ msm_gem_unlock(obj);
+
+out:
+ kfree(buf);
+
+ return ret;
+}
+
+static int msm_ioctl_gem_info_get_metadata(struct drm_gem_object *obj,
+ __user void *metadata,
+ u32 *metadata_size)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ void *buf;
+ int ret, len;
+
+ if (!metadata) {
+ /*
+ * Querying the size is inherently racey, but
+ * EXT_external_objects expects the app to confirm
+ * via device and driver UUIDs that the exporter and
+ * importer versions match. All we can do from the
+ * kernel side is check the length under obj lock
+ * when userspace tries to retrieve the metadata
+ */
+ *metadata_size = msm_obj->metadata_size;
+ return 0;
+ }
+
+ ret = msm_gem_lock_interruptible(obj);
+ if (ret)
+ return ret;
+
+ /* Avoid copy_to_user() under gem obj lock: */
+ len = msm_obj->metadata_size;
+ buf = kmemdup(msm_obj->metadata, len, GFP_KERNEL);
+
+ msm_gem_unlock(obj);
+
+ if (*metadata_size < len) {
+ ret = -ETOOSMALL;
+ } else if (copy_to_user(metadata, buf, len)) {
+ ret = -EFAULT;
+ } else {
+ *metadata_size = len;
+ }
+
+ kfree(buf);
+
+ return 0;
+}
+
static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
struct drm_file *file)
{
@@ -566,6 +646,8 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
break;
case MSM_INFO_SET_NAME:
case MSM_INFO_GET_NAME:
+ case MSM_INFO_SET_METADATA:
+ case MSM_INFO_GET_METADATA:
break;
default:
return -EINVAL;
@@ -618,7 +700,7 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
break;
case MSM_INFO_GET_NAME:
if (args->value && (args->len < strlen(msm_obj->name))) {
- ret = -EINVAL;
+ ret = -ETOOSMALL;
break;
}
args->len = strlen(msm_obj->name);
@@ -628,6 +710,14 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
ret = -EFAULT;
}
break;
+ case MSM_INFO_SET_METADATA:
+ ret = msm_ioctl_gem_info_set_metadata(
+ obj, u64_to_user_ptr(args->value), args->len);
+ break;
+ case MSM_INFO_GET_METADATA:
+ ret = msm_ioctl_gem_info_get_metadata(
+ obj, u64_to_user_ptr(args->value), &args->len);
+ break;
}
drm_gem_object_put(obj);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index cd5bf658df66..16a7cbc0b7dd 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -78,12 +78,10 @@ enum msm_dsi_controller {
* enum msm_event_wait - type of HW events to wait for
* @MSM_ENC_COMMIT_DONE - wait for the driver to flush the registers to HW
* @MSM_ENC_TX_COMPLETE - wait for the HW to transfer the frame to panel
- * @MSM_ENC_VBLANK - wait for the HW VBLANK event (for driver-internal waiters)
*/
enum msm_event_wait {
MSM_ENC_COMMIT_DONE = 0,
MSM_ENC_TX_COMPLETE,
- MSM_ENC_VBLANK,
};
/**
@@ -92,12 +90,14 @@ enum msm_event_wait {
* @num_intf: number of interfaces the panel is mounted on
* @num_dspp: number of dspp blocks used
* @num_dsc: number of Display Stream Compression (DSC) blocks used
+ * @needs_cdm: indicates whether cdm block is needed for this display topology
*/
struct msm_display_topology {
u32 num_lm;
u32 num_intf;
u32 num_dspp;
u32 num_dsc;
+ bool needs_cdm;
};
/* Commit/Event thread specific structure */
@@ -386,10 +386,8 @@ int __init msm_dp_register(void);
void __exit msm_dp_unregister(void);
int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev,
struct drm_encoder *encoder);
-void msm_dp_irq_postinstall(struct msm_dp *dp_display);
void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp_display);
-void msm_dp_debugfs_init(struct msm_dp *dp_display, struct drm_minor *minor);
bool msm_dp_wide_bus_available(const struct msm_dp *dp_display);
#else
@@ -407,19 +405,10 @@ static inline int msm_dp_modeset_init(struct msm_dp *dp_display,
return -EINVAL;
}
-static inline void msm_dp_irq_postinstall(struct msm_dp *dp_display)
-{
-}
-
static inline void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp_display)
{
}
-static inline void msm_dp_debugfs_init(struct msm_dp *dp_display,
- struct drm_minor *minor)
-{
-}
-
static inline bool msm_dp_wide_bus_available(const struct msm_dp *dp_display)
{
return false;
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index db1e748daa75..175ee4ab8a6f 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -226,9 +226,9 @@ static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj,
msm_gem_assert_locked(obj);
- if (GEM_WARN_ON(msm_obj->madv > madv)) {
- DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
- msm_obj->madv, madv);
+ if (msm_obj->madv > madv) {
+ DRM_DEV_DEBUG_DRIVER(obj->dev->dev, "Invalid madv state: %u vs %u\n",
+ msm_obj->madv, madv);
return ERR_PTR(-EBUSY);
}
@@ -1058,6 +1058,7 @@ static void msm_gem_free_object(struct drm_gem_object *obj)
drm_gem_object_release(obj);
+ kfree(msm_obj->metadata);
kfree(msm_obj);
}
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 8ddef5443140..8d414b072c29 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -9,6 +9,7 @@
#include <linux/kref.h>
#include <linux/dma-resv.h>
+#include "drm/drm_exec.h"
#include "drm/gpu_scheduler.h"
#include "msm_drv.h"
@@ -108,6 +109,10 @@ struct msm_gem_object {
char name[32]; /* Identifier to print for the debugfs files */
+ /* userspace metadata backchannel */
+ void *metadata;
+ u32 metadata_size;
+
/**
* pin_count: Number of times the pages are pinned
*
@@ -254,7 +259,7 @@ struct msm_gem_submit {
struct msm_gpu *gpu;
struct msm_gem_address_space *aspace;
struct list_head node; /* node in ring submit list */
- struct ww_acquire_ctx ticket;
+ struct drm_exec exec;
uint32_t seqno; /* Sequence number of the submit on the ring */
/* Hw fence, which is created when the scheduler executes the job, and
@@ -270,9 +275,9 @@ struct msm_gem_submit {
int fence_id; /* key into queue->fence_idr */
struct msm_gpu_submitqueue *queue;
struct pid *pid; /* submitting process */
- bool fault_dumped; /* Limit devcoredump dumping to one per submit */
- bool valid; /* true if no cmdstream patching needed */
- bool in_rb; /* "sudo" mode, copy cmds into RB */
+ bool bos_pinned : 1;
+ bool fault_dumped:1;/* Limit devcoredump dumping to one per submit */
+ bool in_rb : 1; /* "sudo" mode, copy cmds into RB */
struct msm_ringbuffer *ring;
unsigned int nr_cmds;
unsigned int nr_bos;
@@ -287,10 +292,6 @@ struct msm_gem_submit {
struct drm_msm_gem_submit_reloc *relocs;
} *cmd; /* array of size nr_cmds */
struct {
-/* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
-#define BO_VALID 0x8000 /* is current addr in cmdstream correct/valid? */
-#define BO_LOCKED 0x4000 /* obj lock is held */
-#define BO_PINNED 0x2000 /* obj (pages) is pinned and on active list */
uint32_t flags;
union {
struct drm_gem_object *obj;
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
index 5a7d48c02c4b..07ca4ddfe4e3 100644
--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
+++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
@@ -75,7 +75,7 @@ static bool
wait_for_idle(struct drm_gem_object *obj)
{
enum dma_resv_usage usage = dma_resv_usage_rw(true);
- return dma_resv_wait_timeout(obj->resv, usage, false, 1000) > 0;
+ return dma_resv_wait_timeout(obj->resv, usage, false, 10) > 0;
}
static bool
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index c002cabe7b9c..fba78193127d 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -17,6 +17,12 @@
#include "msm_gem.h"
#include "msm_gpu_trace.h"
+/* For userspace errors, use DRM_UT_DRIVER.. so that userspace can enable
+ * error msgs for debugging, but we don't spam dmesg by default
+ */
+#define SUBMIT_ERROR(submit, fmt, ...) \
+ DRM_DEV_DEBUG_DRIVER((submit)->dev->dev, fmt, ##__VA_ARGS__)
+
/*
* Cmdstream submission:
*/
@@ -37,7 +43,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
if (sz > SIZE_MAX)
return ERR_PTR(-ENOMEM);
- submit = kzalloc(sz, GFP_KERNEL);
+ submit = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN);
if (!submit)
return ERR_PTR(-ENOMEM);
@@ -136,7 +142,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
!(submit_bo.flags & MANDATORY_FLAGS)) {
- DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
+ SUBMIT_ERROR(submit, "invalid flags: %x\n", submit_bo.flags);
ret = -EINVAL;
i = 0;
goto out;
@@ -144,8 +150,6 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
submit->bos[i].handle = submit_bo.handle;
submit->bos[i].flags = submit_bo.flags;
- /* in validate_objects() we figure out if this is true: */
- submit->bos[i].iova = submit_bo.presumed;
}
spin_lock(&file->table_lock);
@@ -158,7 +162,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
*/
obj = idr_find(&file->object_idr, submit->bos[i].handle);
if (!obj) {
- DRM_ERROR("invalid handle %u at index %u\n", submit->bos[i].handle, i);
+ SUBMIT_ERROR(submit, "invalid handle %u at index %u\n", submit->bos[i].handle, i);
ret = -EINVAL;
goto out_unlock;
}
@@ -202,13 +206,13 @@ static int submit_lookup_cmds(struct msm_gem_submit *submit,
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
break;
default:
- DRM_ERROR("invalid type: %08x\n", submit_cmd.type);
+ SUBMIT_ERROR(submit, "invalid type: %08x\n", submit_cmd.type);
return -EINVAL;
}
if (submit_cmd.size % 4) {
- DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
- submit_cmd.size);
+ SUBMIT_ERROR(submit, "non-aligned cmdstream buffer size: %u\n",
+ submit_cmd.size);
ret = -EINVAL;
goto out;
}
@@ -228,7 +232,7 @@ static int submit_lookup_cmds(struct msm_gem_submit *submit,
ret = -ENOMEM;
goto out;
}
- submit->cmd[i].relocs = kmalloc(sz, GFP_KERNEL);
+ submit->cmd[i].relocs = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN);
if (!submit->cmd[i].relocs) {
ret = -ENOMEM;
goto out;
@@ -244,101 +248,30 @@ out:
return ret;
}
-/* Unwind bo state, according to cleanup_flags. In the success case, only
- * the lock is dropped at the end of the submit (and active/pin ref is dropped
- * later when the submit is retired).
- */
-static void submit_cleanup_bo(struct msm_gem_submit *submit, int i,
- unsigned cleanup_flags)
-{
- struct drm_gem_object *obj = submit->bos[i].obj;
- unsigned flags = submit->bos[i].flags & cleanup_flags;
-
- /*
- * Clear flags bit before dropping lock, so that the msm_job_run()
- * path isn't racing with submit_cleanup() (ie. the read/modify/
- * write is protected by the obj lock in all paths)
- */
- submit->bos[i].flags &= ~cleanup_flags;
-
- if (flags & BO_PINNED)
- msm_gem_unpin_locked(obj);
-
- if (flags & BO_LOCKED)
- dma_resv_unlock(obj->resv);
-}
-
-static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
-{
- unsigned cleanup_flags = BO_PINNED | BO_LOCKED;
- submit_cleanup_bo(submit, i, cleanup_flags);
-
- if (!(submit->bos[i].flags & BO_VALID))
- submit->bos[i].iova = 0;
-}
-
/* This is where we make sure all the bo's are reserved and pin'd: */
static int submit_lock_objects(struct msm_gem_submit *submit)
{
- int contended, slow_locked = -1, i, ret = 0;
-
-retry:
- for (i = 0; i < submit->nr_bos; i++) {
- struct drm_gem_object *obj = submit->bos[i].obj;
-
- if (slow_locked == i)
- slow_locked = -1;
+ int ret;
- contended = i;
+ drm_exec_init(&submit->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, submit->nr_bos);
- if (!(submit->bos[i].flags & BO_LOCKED)) {
- ret = dma_resv_lock_interruptible(obj->resv,
- &submit->ticket);
+ drm_exec_until_all_locked (&submit->exec) {
+ for (unsigned i = 0; i < submit->nr_bos; i++) {
+ struct drm_gem_object *obj = submit->bos[i].obj;
+ ret = drm_exec_prepare_obj(&submit->exec, obj, 1);
+ drm_exec_retry_on_contention(&submit->exec);
if (ret)
- goto fail;
- submit->bos[i].flags |= BO_LOCKED;
+ goto error;
}
}
- ww_acquire_done(&submit->ticket);
-
return 0;
-fail:
- if (ret == -EALREADY) {
- DRM_ERROR("handle %u at index %u already on submit list\n",
- submit->bos[i].handle, i);
- ret = -EINVAL;
- }
-
- for (; i >= 0; i--)
- submit_unlock_unpin_bo(submit, i);
-
- if (slow_locked > 0)
- submit_unlock_unpin_bo(submit, slow_locked);
-
- if (ret == -EDEADLK) {
- struct drm_gem_object *obj = submit->bos[contended].obj;
- /* we lost out in a seqno race, lock and retry.. */
- ret = dma_resv_lock_slow_interruptible(obj->resv,
- &submit->ticket);
- if (!ret) {
- submit->bos[contended].flags |= BO_LOCKED;
- slow_locked = contended;
- goto retry;
- }
-
- /* Not expecting -EALREADY here, if the bo was already
- * locked, we should have gotten -EALREADY already from
- * the dma_resv_lock_interruptable() call.
- */
- WARN_ON_ONCE(ret == -EALREADY);
- }
-
+error:
return ret;
}
-static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
+static int submit_fence_sync(struct msm_gem_submit *submit)
{
int i, ret = 0;
@@ -346,22 +279,6 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
struct drm_gem_object *obj = submit->bos[i].obj;
bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
- /* NOTE: _reserve_shared() must happen before
- * _add_shared_fence(), which makes this a slightly
- * strange place to call it. OTOH this is a
- * convenient can-fail point to hook it in.
- */
- ret = dma_resv_reserve_fences(obj->resv, 1);
- if (ret)
- return ret;
-
- /* If userspace has determined that explicit fencing is
- * used, it can disable implicit sync on the entire
- * submit:
- */
- if (no_implicit)
- continue;
-
/* Otherwise userspace can ask for implicit sync to be
* disabled on specific buffers. This is useful for internal
* usermode driver managed buffers, suballocation, etc.
@@ -384,8 +301,6 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
struct msm_drm_private *priv = submit->dev->dev_private;
int i, ret = 0;
- submit->valid = true;
-
for (i = 0; i < submit->nr_bos; i++) {
struct drm_gem_object *obj = submit->bos[i].obj;
struct msm_gem_vma *vma;
@@ -401,14 +316,7 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
if (ret)
break;
- if (vma->iova == submit->bos[i].iova) {
- submit->bos[i].flags |= BO_VALID;
- } else {
- submit->bos[i].iova = vma->iova;
- /* iova changed, so address in cmdstream is not valid: */
- submit->bos[i].flags &= ~BO_VALID;
- submit->valid = false;
- }
+ submit->bos[i].iova = vma->iova;
}
/*
@@ -421,13 +329,28 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
mutex_lock(&priv->lru.lock);
for (i = 0; i < submit->nr_bos; i++) {
msm_gem_pin_obj_locked(submit->bos[i].obj);
- submit->bos[i].flags |= BO_PINNED;
}
mutex_unlock(&priv->lru.lock);
+ submit->bos_pinned = true;
+
return ret;
}
+static void submit_unpin_objects(struct msm_gem_submit *submit)
+{
+ if (!submit->bos_pinned)
+ return;
+
+ for (int i = 0; i < submit->nr_bos; i++) {
+ struct drm_gem_object *obj = submit->bos[i].obj;
+
+ msm_gem_unpin_locked(obj);
+ }
+
+ submit->bos_pinned = false;
+}
+
static void submit_attach_object_fences(struct msm_gem_submit *submit)
{
int i;
@@ -445,11 +368,11 @@ static void submit_attach_object_fences(struct msm_gem_submit *submit)
}
static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
- struct drm_gem_object **obj, uint64_t *iova, bool *valid)
+ struct drm_gem_object **obj, uint64_t *iova)
{
if (idx >= submit->nr_bos) {
- DRM_ERROR("invalid buffer index: %u (out of %u)\n",
- idx, submit->nr_bos);
+ SUBMIT_ERROR(submit, "invalid buffer index: %u (out of %u)\n",
+ idx, submit->nr_bos);
return -EINVAL;
}
@@ -457,8 +380,6 @@ static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
*obj = submit->bos[idx].obj;
if (iova)
*iova = submit->bos[idx].iova;
- if (valid)
- *valid = !!(submit->bos[idx].flags & BO_VALID);
return 0;
}
@@ -471,11 +392,8 @@ static int submit_reloc(struct msm_gem_submit *submit, struct drm_gem_object *ob
uint32_t *ptr;
int ret = 0;
- if (!nr_relocs)
- return 0;
-
if (offset % 4) {
- DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
+ SUBMIT_ERROR(submit, "non-aligned cmdstream buffer: %u\n", offset);
return -EINVAL;
}
@@ -494,11 +412,10 @@ static int submit_reloc(struct msm_gem_submit *submit, struct drm_gem_object *ob
struct drm_msm_gem_submit_reloc submit_reloc = relocs[i];
uint32_t off;
uint64_t iova;
- bool valid;
if (submit_reloc.submit_offset % 4) {
- DRM_ERROR("non-aligned reloc offset: %u\n",
- submit_reloc.submit_offset);
+ SUBMIT_ERROR(submit, "non-aligned reloc offset: %u\n",
+ submit_reloc.submit_offset);
ret = -EINVAL;
goto out;
}
@@ -508,18 +425,15 @@ static int submit_reloc(struct msm_gem_submit *submit, struct drm_gem_object *ob
if ((off >= (obj->size / 4)) ||
(off < last_offset)) {
- DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
+ SUBMIT_ERROR(submit, "invalid offset %u at reloc %u\n", off, i);
ret = -EINVAL;
goto out;
}
- ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid);
+ ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova);
if (ret)
goto out;
- if (valid)
- continue;
-
iova += submit_reloc.reloc_offset;
if (submit_reloc.shift < 0)
@@ -544,18 +458,14 @@ out:
*/
static void submit_cleanup(struct msm_gem_submit *submit, bool error)
{
- unsigned cleanup_flags = BO_LOCKED;
- unsigned i;
-
- if (error)
- cleanup_flags |= BO_PINNED;
-
- for (i = 0; i < submit->nr_bos; i++) {
- struct drm_gem_object *obj = submit->bos[i].obj;
- submit_cleanup_bo(submit, i, cleanup_flags);
- if (error)
- drm_gem_object_put(obj);
+ if (error) {
+ submit_unpin_objects(submit);
+ /* job wasn't enqueued to scheduler, so early retirement: */
+ msm_submit_retire(submit);
}
+
+ if (submit->exec.objects)
+ drm_exec_fini(&submit->exec);
}
void msm_submit_retire(struct msm_gem_submit *submit)
@@ -749,7 +659,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct msm_submit_post_dep *post_deps = NULL;
struct drm_syncobj **syncobjs_to_reset = NULL;
int out_fence_fd = -1;
- bool has_ww_ticket = false;
unsigned i;
int ret;
@@ -855,15 +764,15 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
goto out;
/* copy_*_user while holding a ww ticket upsets lockdep */
- ww_acquire_init(&submit->ticket, &reservation_ww_class);
- has_ww_ticket = true;
ret = submit_lock_objects(submit);
if (ret)
goto out;
- ret = submit_fence_sync(submit, !!(args->flags & MSM_SUBMIT_NO_IMPLICIT));
- if (ret)
- goto out;
+ if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) {
+ ret = submit_fence_sync(submit);
+ if (ret)
+ goto out;
+ }
ret = submit_pin_objects(submit);
if (ret)
@@ -873,32 +782,27 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_gem_object *obj;
uint64_t iova;
- ret = submit_bo(submit, submit->cmd[i].idx,
- &obj, &iova, NULL);
+ ret = submit_bo(submit, submit->cmd[i].idx, &obj, &iova);
if (ret)
goto out;
if (!submit->cmd[i].size ||
((submit->cmd[i].size + submit->cmd[i].offset) >
obj->size / 4)) {
- DRM_ERROR("invalid cmdstream size: %u\n", submit->cmd[i].size * 4);
+ SUBMIT_ERROR(submit, "invalid cmdstream size: %u\n", submit->cmd[i].size * 4);
ret = -EINVAL;
goto out;
}
submit->cmd[i].iova = iova + (submit->cmd[i].offset * 4);
- if (submit->valid)
+ if (likely(!submit->cmd[i].nr_relocs))
continue;
if (!gpu->allow_relocs) {
- if (submit->cmd[i].nr_relocs) {
- DRM_ERROR("relocs not allowed\n");
- ret = -EINVAL;
- goto out;
- }
-
- continue;
+ SUBMIT_ERROR(submit, "relocs not allowed\n");
+ ret = -EINVAL;
+ goto out;
}
ret = submit_reloc(submit, obj, submit->cmd[i].offset * 4,
@@ -974,6 +878,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
}
}
+ if (ret)
+ goto out;
+
submit_attach_object_fences(submit);
/* The scheduler owns a ref now: */
@@ -993,8 +900,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
out:
submit_cleanup(submit, !!ret);
- if (has_ww_ticket)
- ww_acquire_fini(&submit->ticket);
out_unlock:
mutex_unlock(&queue->lock);
out_post_unlock:
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 7f64c6667300..095390774f22 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -292,8 +292,7 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
/* Set the active crash state to be dumped on failure */
gpu->crashstate = state;
- /* FIXME: Release the crashstate if this errors out? */
- dev_coredumpm(gpu->dev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL,
+ dev_coredumpm(&gpu->pdev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL,
msm_gpu_devcoredump_read, msm_gpu_devcoredump_free);
}
#else
@@ -366,29 +365,31 @@ static void recover_worker(struct kthread_work *work)
DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name);
submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
- if (submit) {
- /* Increment the fault counts */
- submit->queue->faults++;
- if (submit->aspace)
- submit->aspace->faults++;
- get_comm_cmdline(submit, &comm, &cmd);
+ /*
+ * If the submit retired while we were waiting for the worker to run,
+ * or waiting to acquire the gpu lock, then nothing more to do.
+ */
+ if (!submit)
+ goto out_unlock;
- if (comm && cmd) {
- DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n",
- gpu->name, comm, cmd);
+ /* Increment the fault counts */
+ submit->queue->faults++;
+ if (submit->aspace)
+ submit->aspace->faults++;
- msm_rd_dump_submit(priv->hangrd, submit,
- "offending task: %s (%s)", comm, cmd);
- } else {
- msm_rd_dump_submit(priv->hangrd, submit, NULL);
- }
+ get_comm_cmdline(submit, &comm, &cmd);
+
+ if (comm && cmd) {
+ DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n",
+ gpu->name, comm, cmd);
+
+ msm_rd_dump_submit(priv->hangrd, submit,
+ "offending task: %s (%s)", comm, cmd);
} else {
- /*
- * We couldn't attribute this fault to any particular context,
- * so increment the global fault count instead.
- */
- gpu->global_faults++;
+ DRM_DEV_ERROR(dev->dev, "%s: offending task: unknown\n", gpu->name);
+
+ msm_rd_dump_submit(priv->hangrd, submit, NULL);
}
/* Record the crash state */
@@ -441,6 +442,7 @@ static void recover_worker(struct kthread_work *work)
pm_runtime_put(&gpu->pdev->dev);
+out_unlock:
mutex_unlock(&gpu->lock);
msm_gpu_retire(gpu);
diff --git a/drivers/gpu/drm/msm/msm_mdss.c b/drivers/gpu/drm/msm/msm_mdss.c
index 6865db1e3ce8..455b2e3a0cdd 100644
--- a/drivers/gpu/drm/msm/msm_mdss.c
+++ b/drivers/gpu/drm/msm/msm_mdss.c
@@ -28,6 +28,8 @@
#define MIN_IB_BW 400000000UL /* Min ib vote 400MB */
+#define DEFAULT_REG_BW 153600 /* Used in mdss fbdev driver */
+
struct msm_mdss {
struct device *dev;
@@ -40,8 +42,9 @@ struct msm_mdss {
struct irq_domain *domain;
} irq_controller;
const struct msm_mdss_data *mdss_data;
- struct icc_path *path[2];
- u32 num_paths;
+ struct icc_path *mdp_path[2];
+ u32 num_mdp_paths;
+ struct icc_path *reg_bus_path;
};
static int msm_mdss_parse_data_bus_icc_path(struct device *dev,
@@ -49,38 +52,26 @@ static int msm_mdss_parse_data_bus_icc_path(struct device *dev,
{
struct icc_path *path0;
struct icc_path *path1;
+ struct icc_path *reg_bus_path;
- path0 = of_icc_get(dev, "mdp0-mem");
+ path0 = devm_of_icc_get(dev, "mdp0-mem");
if (IS_ERR_OR_NULL(path0))
return PTR_ERR_OR_ZERO(path0);
- msm_mdss->path[0] = path0;
- msm_mdss->num_paths = 1;
+ msm_mdss->mdp_path[0] = path0;
+ msm_mdss->num_mdp_paths = 1;
- path1 = of_icc_get(dev, "mdp1-mem");
+ path1 = devm_of_icc_get(dev, "mdp1-mem");
if (!IS_ERR_OR_NULL(path1)) {
- msm_mdss->path[1] = path1;
- msm_mdss->num_paths++;
+ msm_mdss->mdp_path[1] = path1;
+ msm_mdss->num_mdp_paths++;
}
- return 0;
-}
-
-static void msm_mdss_put_icc_path(void *data)
-{
- struct msm_mdss *msm_mdss = data;
- int i;
-
- for (i = 0; i < msm_mdss->num_paths; i++)
- icc_put(msm_mdss->path[i]);
-}
-
-static void msm_mdss_icc_request_bw(struct msm_mdss *msm_mdss, unsigned long bw)
-{
- int i;
+ reg_bus_path = of_icc_get(dev, "cpu-cfg");
+ if (!IS_ERR_OR_NULL(reg_bus_path))
+ msm_mdss->reg_bus_path = reg_bus_path;
- for (i = 0; i < msm_mdss->num_paths; i++)
- icc_set_bw(msm_mdss->path[i], 0, Bps_to_icc(bw));
+ return 0;
}
static void msm_mdss_irq(struct irq_desc *desc)
@@ -236,14 +227,22 @@ const struct msm_mdss_data *msm_mdss_get_mdss_data(struct device *dev)
static int msm_mdss_enable(struct msm_mdss *msm_mdss)
{
- int ret;
+ int ret, i;
/*
* Several components have AXI clocks that can only be turned on if
* the interconnect is enabled (non-zero bandwidth). Let's make sure
* that the interconnects are at least at a minimum amount.
*/
- msm_mdss_icc_request_bw(msm_mdss, MIN_IB_BW);
+ for (i = 0; i < msm_mdss->num_mdp_paths; i++)
+ icc_set_bw(msm_mdss->mdp_path[i], 0, Bps_to_icc(MIN_IB_BW));
+
+ if (msm_mdss->mdss_data && msm_mdss->mdss_data->reg_bus_bw)
+ icc_set_bw(msm_mdss->reg_bus_path, 0,
+ msm_mdss->mdss_data->reg_bus_bw);
+ else
+ icc_set_bw(msm_mdss->reg_bus_path, 0,
+ DEFAULT_REG_BW);
ret = clk_bulk_prepare_enable(msm_mdss->num_clocks, msm_mdss->clocks);
if (ret) {
@@ -295,8 +294,15 @@ static int msm_mdss_enable(struct msm_mdss *msm_mdss)
static int msm_mdss_disable(struct msm_mdss *msm_mdss)
{
+ int i;
+
clk_bulk_disable_unprepare(msm_mdss->num_clocks, msm_mdss->clocks);
- msm_mdss_icc_request_bw(msm_mdss, 0);
+
+ for (i = 0; i < msm_mdss->num_mdp_paths; i++)
+ icc_set_bw(msm_mdss->mdp_path[i], 0, 0);
+
+ if (msm_mdss->reg_bus_path)
+ icc_set_bw(msm_mdss->reg_bus_path, 0, 0);
return 0;
}
@@ -384,6 +390,8 @@ static struct msm_mdss *msm_mdss_init(struct platform_device *pdev, bool is_mdp5
if (!msm_mdss)
return ERR_PTR(-ENOMEM);
+ msm_mdss->mdss_data = of_device_get_match_data(&pdev->dev);
+
msm_mdss->mmio = devm_platform_ioremap_resource_byname(pdev, is_mdp5 ? "mdss_phys" : "mdss");
if (IS_ERR(msm_mdss->mmio))
return ERR_CAST(msm_mdss->mmio);
@@ -393,9 +401,6 @@ static struct msm_mdss *msm_mdss_init(struct platform_device *pdev, bool is_mdp5
ret = msm_mdss_parse_data_bus_icc_path(&pdev->dev, msm_mdss);
if (ret)
return ERR_PTR(ret);
- ret = devm_add_action_or_reset(&pdev->dev, msm_mdss_put_icc_path, msm_mdss);
- if (ret)
- return ERR_PTR(ret);
if (is_mdp5)
ret = mdp5_mdss_parse_clock(pdev, &msm_mdss->clocks);
@@ -477,8 +482,6 @@ static int mdss_probe(struct platform_device *pdev)
if (IS_ERR(mdss))
return PTR_ERR(mdss);
- mdss->mdss_data = of_device_get_match_data(&pdev->dev);
-
platform_set_drvdata(pdev, mdss);
/*
@@ -510,11 +513,13 @@ static const struct msm_mdss_data msm8998_data = {
.ubwc_enc_version = UBWC_1_0,
.ubwc_dec_version = UBWC_1_0,
.highest_bank_bit = 2,
+ .reg_bus_bw = 76800,
};
static const struct msm_mdss_data qcm2290_data = {
/* no UBWC */
.highest_bank_bit = 0x2,
+ .reg_bus_bw = 76800,
};
static const struct msm_mdss_data sc7180_data = {
@@ -522,6 +527,7 @@ static const struct msm_mdss_data sc7180_data = {
.ubwc_dec_version = UBWC_2_0,
.ubwc_static = 0x1e,
.highest_bank_bit = 0x3,
+ .reg_bus_bw = 76800,
};
static const struct msm_mdss_data sc7280_data = {
@@ -531,6 +537,7 @@ static const struct msm_mdss_data sc7280_data = {
.ubwc_static = 1,
.highest_bank_bit = 1,
.macrotile_mode = 1,
+ .reg_bus_bw = 74000,
};
static const struct msm_mdss_data sc8180x_data = {
@@ -538,6 +545,7 @@ static const struct msm_mdss_data sc8180x_data = {
.ubwc_dec_version = UBWC_3_0,
.highest_bank_bit = 3,
.macrotile_mode = 1,
+ .reg_bus_bw = 76800,
};
static const struct msm_mdss_data sc8280xp_data = {
@@ -545,14 +553,22 @@ static const struct msm_mdss_data sc8280xp_data = {
.ubwc_dec_version = UBWC_4_0,
.ubwc_swizzle = 6,
.ubwc_static = 1,
- .highest_bank_bit = 2,
+ .highest_bank_bit = 3,
.macrotile_mode = 1,
+ .reg_bus_bw = 76800,
+};
+
+static const struct msm_mdss_data sdm670_data = {
+ .ubwc_enc_version = UBWC_2_0,
+ .ubwc_dec_version = UBWC_2_0,
+ .highest_bank_bit = 1,
};
static const struct msm_mdss_data sdm845_data = {
.ubwc_enc_version = UBWC_2_0,
.ubwc_dec_version = UBWC_2_0,
.highest_bank_bit = 2,
+ .reg_bus_bw = 76800,
};
static const struct msm_mdss_data sm6350_data = {
@@ -561,12 +577,14 @@ static const struct msm_mdss_data sm6350_data = {
.ubwc_swizzle = 6,
.ubwc_static = 0x1e,
.highest_bank_bit = 1,
+ .reg_bus_bw = 76800,
};
static const struct msm_mdss_data sm8150_data = {
.ubwc_enc_version = UBWC_3_0,
.ubwc_dec_version = UBWC_3_0,
.highest_bank_bit = 2,
+ .reg_bus_bw = 76800,
};
static const struct msm_mdss_data sm6115_data = {
@@ -575,6 +593,7 @@ static const struct msm_mdss_data sm6115_data = {
.ubwc_swizzle = 7,
.ubwc_static = 0x11f,
.highest_bank_bit = 0x1,
+ .reg_bus_bw = 76800,
};
static const struct msm_mdss_data sm6125_data = {
@@ -592,6 +611,18 @@ static const struct msm_mdss_data sm8250_data = {
/* TODO: highest_bank_bit = 2 for LP_DDR4 */
.highest_bank_bit = 3,
.macrotile_mode = 1,
+ .reg_bus_bw = 76800,
+};
+
+static const struct msm_mdss_data sm8350_data = {
+ .ubwc_enc_version = UBWC_4_0,
+ .ubwc_dec_version = UBWC_4_0,
+ .ubwc_swizzle = 6,
+ .ubwc_static = 1,
+ /* TODO: highest_bank_bit = 2 for LP_DDR4 */
+ .highest_bank_bit = 3,
+ .macrotile_mode = 1,
+ .reg_bus_bw = 74000,
};
static const struct msm_mdss_data sm8550_data = {
@@ -602,11 +633,13 @@ static const struct msm_mdss_data sm8550_data = {
/* TODO: highest_bank_bit = 2 for LP_DDR4 */
.highest_bank_bit = 3,
.macrotile_mode = 1,
+ .reg_bus_bw = 57000,
};
static const struct of_device_id mdss_dt_match[] = {
{ .compatible = "qcom,mdss" },
{ .compatible = "qcom,msm8998-mdss", .data = &msm8998_data },
{ .compatible = "qcom,qcm2290-mdss", .data = &qcm2290_data },
+ { .compatible = "qcom,sdm670-mdss", .data = &sdm670_data },
{ .compatible = "qcom,sdm845-mdss", .data = &sdm845_data },
{ .compatible = "qcom,sc7180-mdss", .data = &sc7180_data },
{ .compatible = "qcom,sc7280-mdss", .data = &sc7280_data },
@@ -618,9 +651,10 @@ static const struct of_device_id mdss_dt_match[] = {
{ .compatible = "qcom,sm6375-mdss", .data = &sm6350_data },
{ .compatible = "qcom,sm8150-mdss", .data = &sm8150_data },
{ .compatible = "qcom,sm8250-mdss", .data = &sm8250_data },
- { .compatible = "qcom,sm8350-mdss", .data = &sm8250_data },
- { .compatible = "qcom,sm8450-mdss", .data = &sm8250_data },
+ { .compatible = "qcom,sm8350-mdss", .data = &sm8350_data },
+ { .compatible = "qcom,sm8450-mdss", .data = &sm8350_data },
{ .compatible = "qcom,sm8550-mdss", .data = &sm8550_data },
+ { .compatible = "qcom,sm8650-mdss", .data = &sm8550_data},
{}
};
MODULE_DEVICE_TABLE(of, mdss_dt_match);
diff --git a/drivers/gpu/drm/msm/msm_mdss.h b/drivers/gpu/drm/msm/msm_mdss.h
index 02bbab42adbc..3afef4b1786d 100644
--- a/drivers/gpu/drm/msm/msm_mdss.h
+++ b/drivers/gpu/drm/msm/msm_mdss.h
@@ -14,6 +14,7 @@ struct msm_mdss_data {
u32 ubwc_static;
u32 highest_bank_bit;
u32 macrotile_mode;
+ u32 reg_bus_bw;
};
#define UBWC_1_0 0x10000000
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index 5adc51f7ab59..ca44fd291c5b 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -270,6 +270,9 @@ int msm_rd_debugfs_init(struct drm_minor *minor)
struct msm_rd_state *rd;
int ret;
+ if (!priv->gpu_pdev)
+ return 0;
+
/* only create on first minor: */
if (priv->rd)
return 0;
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 4968568e3b54..4bc13f7d005a 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -29,9 +29,10 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
struct drm_gem_object *obj = submit->bos[i].obj;
msm_gem_unpin_active(obj);
- submit->bos[i].flags &= ~BO_PINNED;
}
+ submit->bos_pinned = false;
+
mutex_unlock(&priv->lru.lock);
msm_gpu_submit(gpu, submit);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 11fe75b68e95..8d37a694b772 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -2476,7 +2476,7 @@ nv50_disp_atomic_commit(struct drm_device *dev,
err_cleanup:
if (ret)
- drm_atomic_helper_cleanup_planes(dev, state);
+ drm_atomic_helper_unprepare_planes(dev, state);
done:
pm_runtime_put_autosuspend(dev->dev);
return ret;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
index 2fa0445d8928..d1437c08645f 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
@@ -187,7 +187,7 @@ struct nvkm_gsp {
void (*rpc_done)(struct nvkm_gsp *gsp, void *repv);
void *(*rm_ctrl_get)(struct nvkm_gsp_object *, u32 cmd, u32 argc);
- void *(*rm_ctrl_push)(struct nvkm_gsp_object *, void *argv, u32 repc);
+ int (*rm_ctrl_push)(struct nvkm_gsp_object *, void **argv, u32 repc);
void (*rm_ctrl_done)(struct nvkm_gsp_object *, void *repv);
void *(*rm_alloc_get)(struct nvkm_gsp_object *, u32 oclass, u32 argc);
@@ -265,7 +265,7 @@ nvkm_gsp_rm_ctrl_get(struct nvkm_gsp_object *object, u32 cmd, u32 argc)
return object->client->gsp->rm->rm_ctrl_get(object, cmd, argc);
}
-static inline void *
+static inline int
nvkm_gsp_rm_ctrl_push(struct nvkm_gsp_object *object, void *argv, u32 repc)
{
return object->client->gsp->rm->rm_ctrl_push(object, argv, repc);
@@ -275,21 +275,24 @@ static inline void *
nvkm_gsp_rm_ctrl_rd(struct nvkm_gsp_object *object, u32 cmd, u32 repc)
{
void *argv = nvkm_gsp_rm_ctrl_get(object, cmd, repc);
+ int ret;
if (IS_ERR(argv))
return argv;
- return nvkm_gsp_rm_ctrl_push(object, argv, repc);
+ ret = nvkm_gsp_rm_ctrl_push(object, &argv, repc);
+ if (ret)
+ return ERR_PTR(ret);
+ return argv;
}
static inline int
nvkm_gsp_rm_ctrl_wr(struct nvkm_gsp_object *object, void *argv)
{
- void *repv = nvkm_gsp_rm_ctrl_push(object, argv, 0);
-
- if (IS_ERR(repv))
- return PTR_ERR(repv);
+ int ret = nvkm_gsp_rm_ctrl_push(object, &argv, 0);
+ if (ret)
+ return ret;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h
index 5a2f273d95c8..0e32e71e123f 100644
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h
@@ -26,6 +26,49 @@
* DEALINGS IN THE SOFTWARE.
*/
+/**
+ * msgqTxHeader -- TX queue data structure
+ * @version: the version of this structure, must be 0
+ * @size: the size of the entire queue, including this header
+ * @msgSize: the padded size of queue element, 16 is minimum
+ * @msgCount: the number of elements in this queue
+ * @writePtr: head index of this queue
+ * @flags: 1 = swap the RX pointers
+ * @rxHdrOff: offset of readPtr in this structure
+ * @entryOff: offset of beginning of queue (msgqRxHeader), relative to
+ * beginning of this structure
+ *
+ * The command queue is a queue of RPCs that are sent from the driver to the
+ * GSP. The status queue is a queue of messages/responses from GSP-RM to the
+ * driver. Although the driver allocates memory for both queues, the command
+ * queue is owned by the driver and the status queue is owned by GSP-RM. In
+ * addition, the headers of the two queues must not share the same 4K page.
+ *
+ * Each queue is prefixed with this data structure. The idea is that a queue
+ * and its header are written to only by their owner. That is, only the
+ * driver writes to the command queue and command queue header, and only the
+ * GSP writes to the status (receive) queue and its header.
+ *
+ * This is enforced by the concept of "swapping" the RX pointers. This is
+ * why the 'flags' field must be set to 1. 'rxHdrOff' is how the GSP knows
+ * where the where the tail pointer of its status queue.
+ *
+ * When the driver writes a new RPC to the command queue, it updates writePtr.
+ * When it reads a new message from the status queue, it updates readPtr. In
+ * this way, the GSP knows when a new command is in the queue (it polls
+ * writePtr) and it knows how much free space is in the status queue (it
+ * checks readPtr). The driver never cares about how much free space is in
+ * the status queue.
+ *
+ * As usual, producers write to the head pointer, and consumers read from the
+ * tail pointer. When head == tail, the queue is empty.
+ *
+ * So to summarize:
+ * command.writePtr = head of command queue
+ * command.readPtr = tail of status queue
+ * status.writePtr = head of status queue
+ * status.readPtr = tail of command queue
+ */
typedef struct
{
NvU32 version; // queue version
@@ -38,6 +81,14 @@ typedef struct
NvU32 entryOff; // Offset of entries from start of backing store.
} msgqTxHeader;
+/**
+ * msgqRxHeader - RX queue data structure
+ * @readPtr: tail index of the other queue
+ *
+ * Although this is a separate struct, it could easily be merged into
+ * msgqTxHeader. msgqTxHeader.rxHdrOff is simply the offset of readPtr
+ * from the beginning of msgqTxHeader.
+ */
typedef struct
{
NvU32 readPtr; // message id of last message read
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_os_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_os_nvoc.h
index 754c6af42f30..10121218f4d3 100644
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_os_nvoc.h
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_os_nvoc.h
@@ -38,7 +38,7 @@ typedef struct PACKED_REGISTRY_TABLE
{
NvU32 size;
NvU32 numEntries;
- PACKED_REGISTRY_ENTRY entries[0];
+ PACKED_REGISTRY_ENTRY entries[] __counted_by(numEntries);
} PACKED_REGISTRY_TABLE;
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index b7dda486a7ea..00cc7d1abaa3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -325,8 +325,9 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
(!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
continue;
- if (pi < 0)
- pi = i;
+ /* pick the last one as it will be smallest. */
+ pi = i;
+
/* Stop once the buffer is larger than the current page size. */
if (*size >= 1ULL << vmm->page[i].shift)
break;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index ca762ea55413..5057d976fa57 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -62,7 +62,7 @@ nouveau_fence_signal(struct nouveau_fence *fence)
if (test_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags)) {
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
- if (!--fctx->notify_ref)
+ if (atomic_dec_and_test(&fctx->notify_ref))
drop = 1;
}
@@ -103,6 +103,7 @@ nouveau_fence_context_kill(struct nouveau_fence_chan *fctx, int error)
void
nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
{
+ cancel_work_sync(&fctx->allow_block_work);
nouveau_fence_context_kill(fctx, 0);
nvif_event_dtor(&fctx->event);
fctx->dead = 1;
@@ -167,6 +168,18 @@ nouveau_fence_wait_uevent_handler(struct nvif_event *event, void *repv, u32 repc
return ret;
}
+static void
+nouveau_fence_work_allow_block(struct work_struct *work)
+{
+ struct nouveau_fence_chan *fctx = container_of(work, struct nouveau_fence_chan,
+ allow_block_work);
+
+ if (atomic_read(&fctx->notify_ref) == 0)
+ nvif_event_block(&fctx->event);
+ else
+ nvif_event_allow(&fctx->event);
+}
+
void
nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
{
@@ -178,6 +191,7 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
} args;
int ret;
+ INIT_WORK(&fctx->allow_block_work, nouveau_fence_work_allow_block);
INIT_LIST_HEAD(&fctx->flip);
INIT_LIST_HEAD(&fctx->pending);
spin_lock_init(&fctx->lock);
@@ -521,15 +535,19 @@ static bool nouveau_fence_enable_signaling(struct dma_fence *f)
struct nouveau_fence *fence = from_fence(f);
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
bool ret;
+ bool do_work;
- if (!fctx->notify_ref++)
- nvif_event_allow(&fctx->event);
+ if (atomic_inc_return(&fctx->notify_ref) == 0)
+ do_work = true;
ret = nouveau_fence_no_signaling(f);
if (ret)
set_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags);
- else if (!--fctx->notify_ref)
- nvif_event_block(&fctx->event);
+ else if (atomic_dec_and_test(&fctx->notify_ref))
+ do_work = true;
+
+ if (do_work)
+ schedule_work(&fctx->allow_block_work);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 64d33ae7f356..28f5cf013b89 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -3,6 +3,7 @@
#define __NOUVEAU_FENCE_H__
#include <linux/dma-fence.h>
+#include <linux/workqueue.h>
#include <nvif/event.h>
struct nouveau_drm;
@@ -45,7 +46,9 @@ struct nouveau_fence_chan {
char name[32];
struct nvif_event event;
- int notify_ref, dead, killed;
+ struct work_struct allow_block_work;
+ atomic_t notify_ref;
+ int dead, killed;
};
struct nouveau_fence_priv {
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
index dae3baf707a0..4f223c972c6a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
@@ -1347,7 +1347,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job,
}
}
- drm_exec_init(exec, vme->flags);
+ drm_exec_init(exec, vme->flags, 0);
drm_exec_until_all_locked(exec) {
ret = bind_lock_validate(job, exec, vme->num_fences);
drm_exec_retry_on_contention(exec);
diff --git a/drivers/gpu/drm/nouveau/nouveau_vmm.c b/drivers/gpu/drm/nouveau/nouveau_vmm.c
index a6602c012671..3dda885df5b2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vmm.c
@@ -108,6 +108,9 @@ nouveau_vma_new(struct nouveau_bo *nvbo, struct nouveau_vmm *vmm,
} else {
ret = nvif_vmm_get(&vmm->vmm, PTES, false, mem->mem.page, 0,
mem->mem.size, &tmp);
+ if (ret)
+ goto done;
+
vma->addr = tmp.addr;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
index 457ec5db794d..b24eb1e560bc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
@@ -209,7 +209,7 @@ nvkm_disp_dtor(struct nvkm_engine *engine)
nvkm_head_del(&head);
}
- if (disp->func->dtor)
+ if (disp->func && disp->func->dtor)
disp->func->dtor(disp);
return data;
@@ -243,8 +243,10 @@ nvkm_disp_new_(const struct nvkm_disp_func *func, struct nvkm_device *device,
spin_lock_init(&disp->client.lock);
ret = nvkm_engine_ctor(&nvkm_disp, device, type, inst, true, &disp->engine);
- if (ret)
+ if (ret) {
+ disp->func = NULL;
return ret;
+ }
if (func->super) {
disp->super.wq = create_singlethread_workqueue("nvkm-disp");
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c
index 298035070b3a..6a0a4d3b8902 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c
@@ -282,7 +282,7 @@ r535_sor_bl_get(struct nvkm_ior *sor)
{
struct nvkm_disp *disp = sor->disp;
NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS *ctrl;
- int lvl;
+ int ret, lvl;
ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS,
@@ -292,9 +292,11 @@ r535_sor_bl_get(struct nvkm_ior *sor)
ctrl->displayId = BIT(sor->asy.outp->index);
- ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+ }
lvl = ctrl->brightness;
nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
@@ -649,9 +651,11 @@ r535_conn_new(struct nvkm_disp *disp, u32 id)
ctrl->subDeviceInstance = 0;
ctrl->displayId = BIT(id);
- ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return (void *)ctrl;
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ERR_PTR(ret);
+ }
list_for_each_entry(conn, &disp->conns, head) {
if (conn->index == ctrl->data[0].index) {
@@ -686,7 +690,7 @@ r535_outp_acquire(struct nvkm_outp *outp, bool hda)
struct nvkm_disp *disp = outp->disp;
struct nvkm_ior *ior;
NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS *ctrl;
- int or;
+ int ret, or;
ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
NV0073_CTRL_CMD_DFP_ASSIGN_SOR, sizeof(*ctrl));
@@ -699,9 +703,11 @@ r535_outp_acquire(struct nvkm_outp *outp, bool hda)
if (hda)
ctrl->flags |= NVDEF(NV0073_CTRL, DFP_ASSIGN_SOR_FLAGS, AUDIO, OPTIMAL);
- ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+ }
for (or = 0; or < ARRAY_SIZE(ctrl->sorAssignListWithTag); or++) {
if (ctrl->sorAssignListWithTag[or].displayMask & BIT(outp->index)) {
@@ -727,6 +733,7 @@ static int
r535_disp_head_displayid(struct nvkm_disp *disp, int head, u32 *displayid)
{
NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS *ctrl;
+ int ret;
ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE, sizeof(*ctrl));
@@ -736,9 +743,11 @@ r535_disp_head_displayid(struct nvkm_disp *disp, int head, u32 *displayid)
ctrl->subDeviceInstance = 0;
ctrl->head = head;
- ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+ }
*displayid = ctrl->displayId;
nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
@@ -772,9 +781,11 @@ r535_outp_inherit(struct nvkm_outp *outp)
ctrl->subDeviceInstance = 0;
ctrl->displayId = displayid;
- ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
- if (IS_ERR(ctrl))
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
return NULL;
+ }
id = ctrl->index;
proto = ctrl->protocol;
@@ -825,6 +836,7 @@ r535_outp_dfp_get_info(struct nvkm_outp *outp)
{
NV0073_CTRL_DFP_GET_INFO_PARAMS *ctrl;
struct nvkm_disp *disp = outp->disp;
+ int ret;
ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DFP_GET_INFO, sizeof(*ctrl));
if (IS_ERR(ctrl))
@@ -832,9 +844,11 @@ r535_outp_dfp_get_info(struct nvkm_outp *outp)
ctrl->displayId = BIT(outp->index);
- ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+ }
nvkm_debug(&disp->engine.subdev, "DFP %08x: flags:%08x flags2:%08x\n",
ctrl->displayId, ctrl->flags, ctrl->flags2);
@@ -858,9 +872,11 @@ r535_outp_detect(struct nvkm_outp *outp)
ctrl->subDeviceInstance = 0;
ctrl->displayMask = BIT(outp->index);
- ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+ }
if (ctrl->displayMask & BIT(outp->index)) {
ret = r535_outp_dfp_get_info(outp);
@@ -895,6 +911,7 @@ r535_dp_mst_id_get(struct nvkm_outp *outp, u32 *pid)
{
NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS *ctrl;
struct nvkm_disp *disp = outp->disp;
+ int ret;
ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID,
@@ -904,9 +921,11 @@ r535_dp_mst_id_get(struct nvkm_outp *outp, u32 *pid)
ctrl->subDeviceInstance = 0;
ctrl->displayId = BIT(outp->index);
- ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+ }
*pid = ctrl->displayIdAssigned;
nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
@@ -938,38 +957,60 @@ r535_dp_train_target(struct nvkm_outp *outp, u8 target, bool mst, u8 link_nr, u8
{
struct nvkm_disp *disp = outp->disp;
NV0073_CTRL_DP_CTRL_PARAMS *ctrl;
- int ret;
-
- ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DP_CTRL, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
+ int ret, retries;
+ u32 cmd, data;
- ctrl->subDeviceInstance = 0;
- ctrl->displayId = BIT(outp->index);
- ctrl->cmd = NVDEF(NV0073_CTRL, DP_CMD, SET_LANE_COUNT, TRUE) |
- NVDEF(NV0073_CTRL, DP_CMD, SET_LINK_BW, TRUE) |
- NVDEF(NV0073_CTRL, DP_CMD, TRAIN_PHY_REPEATER, YES);
- ctrl->data = NVVAL(NV0073_CTRL, DP_DATA, SET_LANE_COUNT, link_nr) |
- NVVAL(NV0073_CTRL, DP_DATA, SET_LINK_BW, link_bw) |
- NVVAL(NV0073_CTRL, DP_DATA, TARGET, target);
+ cmd = NVDEF(NV0073_CTRL, DP_CMD, SET_LANE_COUNT, TRUE) |
+ NVDEF(NV0073_CTRL, DP_CMD, SET_LINK_BW, TRUE) |
+ NVDEF(NV0073_CTRL, DP_CMD, TRAIN_PHY_REPEATER, YES);
+ data = NVVAL(NV0073_CTRL, DP_DATA, SET_LANE_COUNT, link_nr) |
+ NVVAL(NV0073_CTRL, DP_DATA, SET_LINK_BW, link_bw) |
+ NVVAL(NV0073_CTRL, DP_DATA, TARGET, target);
if (mst)
- ctrl->cmd |= NVDEF(NV0073_CTRL, DP_CMD, SET_FORMAT_MODE, MULTI_STREAM);
+ cmd |= NVDEF(NV0073_CTRL, DP_CMD, SET_FORMAT_MODE, MULTI_STREAM);
if (outp->dp.dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP)
- ctrl->cmd |= NVDEF(NV0073_CTRL, DP_CMD, SET_ENHANCED_FRAMING, TRUE);
+ cmd |= NVDEF(NV0073_CTRL, DP_CMD, SET_ENHANCED_FRAMING, TRUE);
if (target == 0 &&
(outp->dp.dpcd[DPCD_RC02] & 0x20) &&
!(outp->dp.dpcd[DPCD_RC03] & DPCD_RC03_TPS4_SUPPORTED))
- ctrl->cmd |= NVDEF(NV0073_CTRL, DP_CMD, POST_LT_ADJ_REQ_GRANTED, YES);
+ cmd |= NVDEF(NV0073_CTRL, DP_CMD, POST_LT_ADJ_REQ_GRANTED, YES);
- ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
+ /* We should retry up to 3 times, but only if GSP asks politely */
+ for (retries = 0; retries < 3; ++retries) {
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DP_CTRL,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->subDeviceInstance = 0;
+ ctrl->displayId = BIT(outp->index);
+ ctrl->retryTimeMs = 0;
+ ctrl->cmd = cmd;
+ ctrl->data = data;
+
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret == -EAGAIN && ctrl->retryTimeMs) {
+ /*
+ * Device (likely an eDP panel) isn't ready yet, wait for the time specified
+ * by GSP before retrying again
+ */
+ nvkm_debug(&disp->engine.subdev,
+ "Waiting %dms for GSP LT panel delay before retrying\n",
+ ctrl->retryTimeMs);
+ msleep(ctrl->retryTimeMs);
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ } else {
+ /* GSP didn't say to retry, or we were successful */
+ if (ctrl->err)
+ ret = -EIO;
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ break;
+ }
+ }
- ret = ctrl->err ? -EIO : 0;
- nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
return ret;
}
@@ -1036,9 +1077,11 @@ r535_dp_aux_xfer(struct nvkm_outp *outp, u8 type, u32 addr, u8 *data, u8 *psize)
ctrl->size = !ctrl->bAddrOnly ? (size - 1) : 0;
memcpy(ctrl->data, data, size);
- ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
- if (IS_ERR(ctrl))
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
return PTR_ERR(ctrl);
+ }
memcpy(data, ctrl->data, size);
*psize = ctrl->size;
@@ -1111,10 +1154,13 @@ r535_tmds_edid_get(struct nvkm_outp *outp, u8 *data, u16 *psize)
ctrl->subDeviceInstance = 0;
ctrl->displayId = BIT(outp->index);
- ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+ }
+ ret = -E2BIG;
if (ctrl->bufferSize <= *psize) {
memcpy(data, ctrl->edidBuffer, ctrl->bufferSize);
*psize = ctrl->bufferSize;
@@ -1153,9 +1199,11 @@ r535_outp_new(struct nvkm_disp *disp, u32 id)
ctrl->subDeviceInstance = 0;
ctrl->displayId = BIT(id);
- ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+ }
switch (ctrl->type) {
case NV0073_CTRL_SPECIFIC_OR_TYPE_NONE:
@@ -1229,9 +1277,11 @@ r535_outp_new(struct nvkm_disp *disp, u32 id)
ctrl->sorIndex = ~0;
- ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+ }
switch (NVVAL_GET(ctrl->maxLinkRate, NV0073_CTRL_CMD, DP_GET_CAPS, MAX_LINK_RATE)) {
case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62:
@@ -1465,8 +1515,6 @@ r535_disp_oneinit(struct nvkm_disp *disp)
bool nvhg = acpi_check_dsm(handle, &NVHG_DSM_GUID, NVHG_DSM_REV,
1ULL << 0x00000014);
- printk(KERN_ERR "bl: nbci:%d nvhg:%d\n", nbci, nvhg);
-
if (nbci || nvhg) {
union acpi_object argv4 = {
.buffer.type = ACPI_TYPE_BUFFER,
@@ -1479,9 +1527,6 @@ r535_disp_oneinit(struct nvkm_disp *disp)
if (!obj) {
acpi_handle_info(handle, "failed to evaluate _DSM\n");
} else {
- printk(KERN_ERR "bl: obj type %d\n", obj->type);
- printk(KERN_ERR "bl: obj len %d\n", obj->package.count);
-
for (int i = 0; i < obj->package.count; i++) {
union acpi_object *elt = &obj->package.elements[i];
u32 size;
@@ -1491,12 +1536,10 @@ r535_disp_oneinit(struct nvkm_disp *disp)
else
size = 4;
- printk(KERN_ERR "elt %03d: type %d size %d\n", i, elt->type, size);
memcpy(&ctrl->backLightData[ctrl->backLightDataSize], &elt->integer.value, size);
ctrl->backLightDataSize += size;
}
- printk(KERN_ERR "bl: data size %d\n", ctrl->backLightDataSize);
ctrl->status = 0;
ACPI_FREE(obj);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/uoutp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/uoutp.c
index e4279f1772a1..377d0e0cef84 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/uoutp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/uoutp.c
@@ -385,7 +385,7 @@ nvkm_uoutp_mthd_inherit(struct nvkm_outp *outp, void *argv, u32 argc)
/* Ensure an ior is hooked up to this outp already */
ior = outp->func->inherit(outp);
- if (!ior)
+ if (!ior || !ior->arm.head)
return -ENODEV;
/* With iors, there will be a separate output path for each type of connector - and all of
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c
index c8ce7ff18713..e74493a4569e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c
@@ -550,6 +550,10 @@ ga100_fifo_nonstall_ctor(struct nvkm_fifo *fifo)
struct nvkm_engn *engn = list_first_entry(&runl->engns, typeof(*engn), head);
runl->nonstall.vector = engn->func->nonstall(engn);
+
+ /* if no nonstall vector just keep going */
+ if (runl->nonstall.vector == -1)
+ continue;
if (runl->nonstall.vector < 0) {
RUNL_ERROR(runl, "nonstall %d", runl->nonstall.vector);
return runl->nonstall.vector;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c
index d088e636edc3..3454c7d29502 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c
@@ -242,6 +242,7 @@ r535_chan_id_put(struct nvkm_chan *chan)
nvkm_memory_unref(&userd->mem);
nvkm_chid_put(runl->chid, userd->chid, &chan->cgrp->lock);
list_del(&userd->head);
+ kfree(userd);
}
break;
@@ -350,7 +351,7 @@ r535_engn_nonstall(struct nvkm_engn *engn)
int ret;
ret = nvkm_gsp_intr_nonstall(subdev->device->gsp, subdev->type, subdev->inst);
- WARN_ON(ret < 0);
+ WARN_ON(ret == -ENOENT);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
index 04bceaa28a19..da1bebb896f7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
@@ -25,12 +25,8 @@ int
nvkm_gsp_intr_nonstall(struct nvkm_gsp *gsp, enum nvkm_subdev_type type, int inst)
{
for (int i = 0; i < gsp->intr_nr; i++) {
- if (gsp->intr[i].type == type && gsp->intr[i].inst == inst) {
- if (gsp->intr[i].nonstall != ~0)
- return gsp->intr[i].nonstall;
-
- return -EINVAL;
- }
+ if (gsp->intr[i].type == type && gsp->intr[i].inst == inst)
+ return gsp->intr[i].nonstall;
}
return -ENOENT;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
index dc44f5c7833f..9ee58e2a0eb2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
@@ -70,6 +70,20 @@ struct r535_gsp_msg {
#define GSP_MSG_HDR_SIZE offsetof(struct r535_gsp_msg, data)
+static int
+r535_rpc_status_to_errno(uint32_t rpc_status)
+{
+ switch (rpc_status) {
+ case 0x55: /* NV_ERR_NOT_READY */
+ case 0x66: /* NV_ERR_TIMEOUT_RETRY */
+ return -EAGAIN;
+ case 0x51: /* NV_ERR_NO_MEMORY */
+ return -ENOMEM;
+ default:
+ return -EINVAL;
+ }
+}
+
static void *
r535_gsp_msgq_wait(struct nvkm_gsp *gsp, u32 repc, u32 *prepc, int *ptime)
{
@@ -298,7 +312,8 @@ retry:
struct nvkm_gsp_msgq_ntfy *ntfy = &gsp->msgq.ntfy[i];
if (ntfy->fn == msg->function) {
- ntfy->func(ntfy->priv, ntfy->fn, msg->data, msg->length - sizeof(*msg));
+ if (ntfy->func)
+ ntfy->func(ntfy->priv, ntfy->fn, msg->data, msg->length - sizeof(*msg));
break;
}
}
@@ -365,10 +380,8 @@ r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc)
}
ret = r535_gsp_cmdq_push(gsp, rpc);
- if (ret) {
- mutex_unlock(&gsp->cmdq.mutex);
+ if (ret)
return ERR_PTR(ret);
- }
if (wait) {
msg = r535_gsp_msg_recv(gsp, fn, repc);
@@ -585,14 +598,14 @@ r535_gsp_rpc_rm_alloc_push(struct nvkm_gsp_object *object, void *argv, u32 repc)
return rpc;
if (rpc->status) {
- nvkm_error(&gsp->subdev, "RM_ALLOC: 0x%x\n", rpc->status);
- ret = ERR_PTR(-EINVAL);
+ ret = ERR_PTR(r535_rpc_status_to_errno(rpc->status));
+ if (PTR_ERR(ret) != -EAGAIN)
+ nvkm_error(&gsp->subdev, "RM_ALLOC: 0x%x\n", rpc->status);
} else {
ret = repc ? rpc->params : NULL;
}
- if (IS_ERR_OR_NULL(ret))
- nvkm_gsp_rpc_done(gsp, rpc);
+ nvkm_gsp_rpc_done(gsp, rpc);
return ret;
}
@@ -625,29 +638,34 @@ r535_gsp_rpc_rm_ctrl_done(struct nvkm_gsp_object *object, void *repv)
{
rpc_gsp_rm_control_v03_00 *rpc = container_of(repv, typeof(*rpc), params);
+ if (!repv)
+ return;
nvkm_gsp_rpc_done(object->client->gsp, rpc);
}
-static void *
-r535_gsp_rpc_rm_ctrl_push(struct nvkm_gsp_object *object, void *argv, u32 repc)
+static int
+r535_gsp_rpc_rm_ctrl_push(struct nvkm_gsp_object *object, void **argv, u32 repc)
{
- rpc_gsp_rm_control_v03_00 *rpc = container_of(argv, typeof(*rpc), params);
+ rpc_gsp_rm_control_v03_00 *rpc = container_of((*argv), typeof(*rpc), params);
struct nvkm_gsp *gsp = object->client->gsp;
- void *ret;
+ int ret = 0;
rpc = nvkm_gsp_rpc_push(gsp, rpc, true, repc);
- if (IS_ERR_OR_NULL(rpc))
- return rpc;
+ if (IS_ERR_OR_NULL(rpc)) {
+ *argv = NULL;
+ return PTR_ERR(rpc);
+ }
if (rpc->status) {
- nvkm_error(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x failed: 0x%08x\n",
- object->client->object.handle, object->handle, rpc->cmd, rpc->status);
- ret = ERR_PTR(-EINVAL);
- } else {
- ret = repc ? rpc->params : NULL;
+ ret = r535_rpc_status_to_errno(rpc->status);
+ if (ret != -EAGAIN)
+ nvkm_error(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x failed: 0x%08x\n",
+ object->client->object.handle, object->handle, rpc->cmd, rpc->status);
}
- if (IS_ERR_OR_NULL(ret))
+ if (repc)
+ *argv = rpc->params;
+ else
nvkm_gsp_rpc_done(gsp, rpc);
return ret;
@@ -845,9 +863,11 @@ r535_gsp_intr_get_table(struct nvkm_gsp *gsp)
if (IS_ERR(ctrl))
return PTR_ERR(ctrl);
- ctrl = nvkm_gsp_rm_ctrl_push(&gsp->internal.device.subdevice, ctrl, sizeof(*ctrl));
- if (WARN_ON(IS_ERR(ctrl)))
- return PTR_ERR(ctrl);
+ ret = nvkm_gsp_rm_ctrl_push(&gsp->internal.device.subdevice, &ctrl, sizeof(*ctrl));
+ if (WARN_ON(ret)) {
+ nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+ return ret;
+ }
for (unsigned i = 0; i < ctrl->tableLen; i++) {
enum nvkm_subdev_type type;
@@ -1048,7 +1068,7 @@ r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp)
char *strings;
int str_offset;
int i;
- size_t rpc_size = sizeof(*rpc) + sizeof(rpc->entries[0]) * NV_GSP_REG_NUM_ENTRIES;
+ size_t rpc_size = struct_size(rpc, entries, NV_GSP_REG_NUM_ENTRIES);
/* add strings + null terminator */
for (i = 0; i < NV_GSP_REG_NUM_ENTRIES; i++)
@@ -1101,16 +1121,12 @@ r535_gsp_acpi_caps(acpi_handle handle, CAPS_METHOD_DATA *caps)
if (!obj)
return;
- printk(KERN_ERR "nvop: obj type %d\n", obj->type);
- printk(KERN_ERR "nvop: obj len %d\n", obj->buffer.length);
-
if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) ||
WARN_ON(obj->buffer.length != 4))
return;
caps->status = 0;
caps->optimusCaps = *(u32 *)obj->buffer.pointer;
- printk(KERN_ERR "nvop: caps %08x\n", caps->optimusCaps);
ACPI_FREE(obj);
@@ -1137,9 +1153,6 @@ r535_gsp_acpi_jt(acpi_handle handle, JT_METHOD_DATA *jt)
if (!obj)
return;
- printk(KERN_ERR "jt: obj type %d\n", obj->type);
- printk(KERN_ERR "jt: obj len %d\n", obj->buffer.length);
-
if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) ||
WARN_ON(obj->buffer.length != 4))
return;
@@ -1148,7 +1161,6 @@ r535_gsp_acpi_jt(acpi_handle handle, JT_METHOD_DATA *jt)
jt->jtCaps = *(u32 *)obj->buffer.pointer;
jt->jtRevId = (jt->jtCaps & 0xfff00000) >> 20;
jt->bSBIOSCaps = 0;
- printk(KERN_ERR "jt: caps %08x rev:%04x\n", jt->jtCaps, jt->jtRevId);
ACPI_FREE(obj);
@@ -1159,6 +1171,8 @@ static void
r535_gsp_acpi_mux_id(acpi_handle handle, u32 id, MUX_METHOD_DATA_ELEMENT *mode,
MUX_METHOD_DATA_ELEMENT *part)
{
+ union acpi_object mux_arg = { ACPI_TYPE_INTEGER };
+ struct acpi_object_list input = { 1, &mux_arg };
acpi_handle iter = NULL, handle_mux = NULL;
acpi_status status;
unsigned long long value;
@@ -1181,14 +1195,18 @@ r535_gsp_acpi_mux_id(acpi_handle handle, u32 id, MUX_METHOD_DATA_ELEMENT *mode,
if (!handle_mux)
return;
- status = acpi_evaluate_integer(handle_mux, "MXDM", NULL, &value);
+ /* I -think- 0 means "acquire" according to nvidia's driver source */
+ input.pointer->integer.type = ACPI_TYPE_INTEGER;
+ input.pointer->integer.value = 0;
+
+ status = acpi_evaluate_integer(handle_mux, "MXDM", &input, &value);
if (ACPI_SUCCESS(status)) {
mode->acpiId = id;
mode->mode = value;
mode->status = 0;
}
- status = acpi_evaluate_integer(handle_mux, "MXDS", NULL, &value);
+ status = acpi_evaluate_integer(handle_mux, "MXDS", &input, &value);
if (ACPI_SUCCESS(status)) {
part->acpiId = id;
part->mode = value;
@@ -1234,8 +1252,8 @@ r535_gsp_acpi_dod(acpi_handle handle, DOD_METHOD_DATA *dod)
dod->acpiIdListLen += sizeof(dod->acpiIdList[0]);
}
- printk(KERN_ERR "_DOD: ok! len:%d\n", dod->acpiIdListLen);
dod->status = 0;
+ kfree(output.pointer);
}
#endif
@@ -1379,6 +1397,13 @@ r535_gsp_msg_post_event(void *priv, u32 fn, void *repv, u32 repc)
return 0;
}
+/**
+ * r535_gsp_msg_run_cpu_sequencer() -- process I/O commands from the GSP
+ *
+ * The GSP sequencer is a list of I/O commands that the GSP can send to
+ * the driver to perform for various purposes. The most common usage is to
+ * perform a special mid-initialization reset.
+ */
static int
r535_gsp_msg_run_cpu_sequencer(void *priv, u32 fn, void *repv, u32 repc)
{
@@ -1718,6 +1743,23 @@ r535_gsp_libos_id8(const char *name)
return id;
}
+/**
+ * create_pte_array() - creates a PTE array of a physically contiguous buffer
+ * @ptes: pointer to the array
+ * @addr: base address of physically contiguous buffer (GSP_PAGE_SIZE aligned)
+ * @size: size of the buffer
+ *
+ * GSP-RM sometimes expects physically-contiguous buffers to have an array of
+ * "PTEs" for each page in that buffer. Although in theory that allows for
+ * the buffer to be physically discontiguous, GSP-RM does not currently
+ * support that.
+ *
+ * In this case, the PTEs are DMA addresses of each page of the buffer. Since
+ * the buffer is physically contiguous, calculating all the PTEs is simple
+ * math.
+ *
+ * See memdescGetPhysAddrsForGpu()
+ */
static void create_pte_array(u64 *ptes, dma_addr_t addr, size_t size)
{
unsigned int num_pages = DIV_ROUND_UP_ULL(size, GSP_PAGE_SIZE);
@@ -1727,6 +1769,35 @@ static void create_pte_array(u64 *ptes, dma_addr_t addr, size_t size)
ptes[i] = (u64)addr + (i << GSP_PAGE_SHIFT);
}
+/**
+ * r535_gsp_libos_init() -- create the libos arguments structure
+ *
+ * The logging buffers are byte queues that contain encoded printf-like
+ * messages from GSP-RM. They need to be decoded by a special application
+ * that can parse the buffers.
+ *
+ * The 'loginit' buffer contains logs from early GSP-RM init and
+ * exception dumps. The 'logrm' buffer contains the subsequent logs. Both are
+ * written to directly by GSP-RM and can be any multiple of GSP_PAGE_SIZE.
+ *
+ * The physical address map for the log buffer is stored in the buffer
+ * itself, starting with offset 1. Offset 0 contains the "put" pointer.
+ *
+ * The GSP only understands 4K pages (GSP_PAGE_SIZE), so even if the kernel is
+ * configured for a larger page size (e.g. 64K pages), we need to give
+ * the GSP an array of 4K pages. Fortunately, since the buffer is
+ * physically contiguous, it's simple math to calculate the addresses.
+ *
+ * The buffers must be a multiple of GSP_PAGE_SIZE. GSP-RM also currently
+ * ignores the @kind field for LOGINIT, LOGINTR, and LOGRM, but expects the
+ * buffers to be physically contiguous anyway.
+ *
+ * The memory allocated for the arguments must remain until the GSP sends the
+ * init_done RPC.
+ *
+ * See _kgspInitLibosLoggingStructures (allocates memory for buffers)
+ * See kgspSetupLibosInitArgs_IMPL (creates pLibosInitArgs[] array)
+ */
static int
r535_gsp_libos_init(struct nvkm_gsp *gsp)
{
@@ -1837,6 +1908,35 @@ nvkm_gsp_radix3_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_radix3 *rx3)
nvkm_gsp_mem_dtor(gsp, &rx3->mem[i]);
}
+/**
+ * nvkm_gsp_radix3_sg - build a radix3 table from a S/G list
+ *
+ * The GSP uses a three-level page table, called radix3, to map the firmware.
+ * Each 64-bit "pointer" in the table is either the bus address of an entry in
+ * the next table (for levels 0 and 1) or the bus address of the next page in
+ * the GSP firmware image itself.
+ *
+ * Level 0 contains a single entry in one page that points to the first page
+ * of level 1.
+ *
+ * Level 1, since it's also only one page in size, contains up to 512 entries,
+ * one for each page in Level 2.
+ *
+ * Level 2 can be up to 512 pages in size, and each of those entries points to
+ * the next page of the firmware image. Since there can be up to 512*512
+ * pages, that limits the size of the firmware to 512*512*GSP_PAGE_SIZE = 1GB.
+ *
+ * Internally, the GSP has its window into system memory, but the base
+ * physical address of the aperture is not 0. In fact, it varies depending on
+ * the GPU architecture. Since the GPU is a PCI device, this window is
+ * accessed via DMA and is therefore bound by IOMMU translation. The end
+ * result is that GSP-RM must translate the bus addresses in the table to GSP
+ * physical addresses. All this should happen transparently.
+ *
+ * Returns 0 on success, or negative error code
+ *
+ * See kgspCreateRadix3_IMPL
+ */
static int
nvkm_gsp_radix3_sg(struct nvkm_device *device, struct sg_table *sgt, u64 size,
struct nvkm_gsp_radix3 *rx3)
@@ -2106,7 +2206,9 @@ r535_gsp_oneinit(struct nvkm_gsp *gsp)
r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED,
r535_gsp_msg_mmu_fault_queued, gsp);
r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_OS_ERROR_LOG, r535_gsp_msg_os_error_log, gsp);
-
+ r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_PERF_BRIDGELESS_INFO_UPDATE, NULL, NULL);
+ r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT, NULL, NULL);
+ r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_SEND_USER_SHARED_DATA, NULL, NULL);
ret = r535_gsp_rm_boot_ctor(gsp);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
index 1b811d6972a1..201022ae9214 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
@@ -49,14 +49,14 @@
#include <subdev/mmu.h>
struct gk20a_instobj {
- struct nvkm_memory memory;
+ struct nvkm_instobj base;
struct nvkm_mm_node *mn;
struct gk20a_instmem *imem;
/* CPU mapping */
u32 *vaddr;
};
-#define gk20a_instobj(p) container_of((p), struct gk20a_instobj, memory)
+#define gk20a_instobj(p) container_of((p), struct gk20a_instobj, base.memory)
/*
* Used for objects allocated using the DMA API
@@ -148,7 +148,7 @@ gk20a_instobj_iommu_recycle_vaddr(struct gk20a_instobj_iommu *obj)
list_del(&obj->vaddr_node);
vunmap(obj->base.vaddr);
obj->base.vaddr = NULL;
- imem->vaddr_use -= nvkm_memory_size(&obj->base.memory);
+ imem->vaddr_use -= nvkm_memory_size(&obj->base.base.memory);
nvkm_debug(&imem->base.subdev, "vaddr used: %x/%x\n", imem->vaddr_use,
imem->vaddr_max);
}
@@ -283,7 +283,7 @@ gk20a_instobj_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
{
struct gk20a_instobj *node = gk20a_instobj(memory);
struct nvkm_vmm_map map = {
- .memory = &node->memory,
+ .memory = &node->base.memory,
.offset = offset,
.mem = node->mn,
};
@@ -391,8 +391,8 @@ gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align,
return -ENOMEM;
*_node = &node->base;
- nvkm_memory_ctor(&gk20a_instobj_func_dma, &node->base.memory);
- node->base.memory.ptrs = &gk20a_instobj_ptrs;
+ nvkm_memory_ctor(&gk20a_instobj_func_dma, &node->base.base.memory);
+ node->base.base.memory.ptrs = &gk20a_instobj_ptrs;
node->base.vaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT,
&node->handle, GFP_KERNEL,
@@ -438,8 +438,8 @@ gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align,
*_node = &node->base;
node->dma_addrs = (void *)(node->pages + npages);
- nvkm_memory_ctor(&gk20a_instobj_func_iommu, &node->base.memory);
- node->base.memory.ptrs = &gk20a_instobj_ptrs;
+ nvkm_memory_ctor(&gk20a_instobj_func_iommu, &node->base.base.memory);
+ node->base.base.memory.ptrs = &gk20a_instobj_ptrs;
/* Allocate backing memory */
for (i = 0; i < npages; i++) {
@@ -533,7 +533,7 @@ gk20a_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
else
ret = gk20a_instobj_ctor_dma(imem, size >> PAGE_SHIFT,
align, &node);
- *pmemory = node ? &node->memory : NULL;
+ *pmemory = node ? &node->base.memory : NULL;
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c
index e7e8fdf3adab..29682722b0b3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c
@@ -28,19 +28,14 @@ static void
gp10b_ltc_init(struct nvkm_ltc *ltc)
{
struct nvkm_device *device = ltc->subdev.device;
- struct iommu_fwspec *spec;
+ u32 sid;
nvkm_wr32(device, 0x17e27c, ltc->ltc_nr);
nvkm_wr32(device, 0x17e000, ltc->ltc_nr);
nvkm_wr32(device, 0x100800, ltc->ltc_nr);
- spec = dev_iommu_fwspec_get(device->dev);
- if (spec) {
- u32 sid = spec->ids[0] & 0xffff;
-
- /* stream ID */
+ if (tegra_dev_iommu_get_stream_id(device->dev, &sid))
nvkm_wr32(device, 0x160000, sid << 2);
- }
}
static const struct nvkm_ltc_func
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
index e34bc6076401..8379e72d77ab 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
@@ -31,7 +31,7 @@ tu102_vmm_flush(struct nvkm_vmm *vmm, int depth)
type |= 0x00000001; /* PAGE_ALL */
if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR]))
- type |= 0x00000004; /* HUB_ONLY */
+ type |= 0x00000006; /* HUB_ONLY | ALL PDB (hack) */
mutex_lock(&vmm->mmu->mutex);
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 99e14dc212ec..dad938cf6dec 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -194,6 +194,15 @@ config DRM_PANEL_ILITEK_ILI9341
QVGA (240x320) RGB panels. support serial & parallel rgb
interface.
+config DRM_PANEL_ILITEK_ILI9805
+ tristate "Ilitek ILI9805-based panels"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y if you want to enable support for panels based on the
+ Ilitek ILI9805 controller.
+
config DRM_PANEL_ILITEK_ILI9881C
tristate "Ilitek ILI9881C-based panels"
depends on OF
@@ -735,6 +744,15 @@ config DRM_PANEL_SITRONIX_ST7789V
Say Y here if you want to enable support for the Sitronix
ST7789V controller for 240x320 LCD panels
+config DRM_PANEL_SYNAPTICS_R63353
+ tristate "Synaptics R63353-based panels"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y if you want to enable support for panels based on the
+ Synaptics R63353 controller.
+
config DRM_PANEL_SONY_ACX565AKM
tristate "Sony ACX565AKM panel"
depends on GPIOLIB && OF && SPI
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index d10c3de51c6d..d94a644d0a6c 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_DRM_PANEL_FEIYANG_FY07024DI26A30D) += panel-feiyang-fy07024di26a30d
obj-$(CONFIG_DRM_PANEL_HIMAX_HX8394) += panel-himax-hx8394.o
obj-$(CONFIG_DRM_PANEL_ILITEK_IL9322) += panel-ilitek-ili9322.o
obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9341) += panel-ilitek-ili9341.o
+obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9805) += panel-ilitek-ili9805.o
obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9881C) += panel-ilitek-ili9881c.o
obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9882T) += panel-ilitek-ili9882t.o
obj-$(CONFIG_DRM_PANEL_INNOLUX_EJ030NA) += panel-innolux-ej030na.o
@@ -74,6 +75,7 @@ obj-$(CONFIG_DRM_PANEL_SHARP_LS060T1SX01) += panel-sharp-ls060t1sx01.o
obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7701) += panel-sitronix-st7701.o
obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7703) += panel-sitronix-st7703.o
obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7789V) += panel-sitronix-st7789v.o
+obj-$(CONFIG_DRM_PANEL_SYNAPTICS_R63353) += panel-synaptics-r63353.o
obj-$(CONFIG_DRM_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o
obj-$(CONFIG_DRM_PANEL_SONY_TD4353_JDI) += panel-sony-td4353-jdi.o
obj-$(CONFIG_DRM_PANEL_SONY_TULIP_TRULY_NT35521) += panel-sony-tulip-truly-nt35521.o
diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
index be8f48e3c1db..c4c0f08e9202 100644
--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
@@ -1764,6 +1764,7 @@ static const struct panel_desc starry_qfh032011_53g_desc = {
.mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
MIPI_DSI_MODE_LPM,
.init_cmds = starry_qfh032011_53g_init_cmd,
+ .lp11_before_reset = true,
};
static const struct drm_display_mode starry_himax83102_j02_default_mode = {
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index c76f186c4baa..a0b6f69b916f 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -1980,9 +1980,10 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('I', 'V', 'O', 0x8c4d, &delay_200_150_e200, "R140NWFM R1"),
EDP_PANEL_ENTRY('K', 'D', 'B', 0x0624, &kingdisplay_kd116n21_30nv_a010.delay, "116N21-30NV-A010"),
- EDP_PANEL_ENTRY('K', 'D', 'C', 0x0809, &delay_200_500_e50, "KD116N2930A15"),
EDP_PANEL_ENTRY('K', 'D', 'B', 0x1120, &delay_200_500_e80_d50, "116N29-30NK-C007"),
+ EDP_PANEL_ENTRY('K', 'D', 'C', 0x0809, &delay_200_500_e50, "KD116N2930A15"),
+
EDP_PANEL_ENTRY('S', 'D', 'C', 0x416d, &delay_100_500_e200, "ATNA45AF01"),
EDP_PANEL_ENTRY('S', 'H', 'P', 0x1511, &delay_200_500_e50, "LQ140M1JW48"),
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9805.c b/drivers/gpu/drm/panel/panel-ilitek-ili9805.c
new file mode 100644
index 000000000000..1cbc25758bd2
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9805.c
@@ -0,0 +1,405 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 BSH Hausgerate GmbH
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#include <video/mipi_display.h>
+
+#define ILI9805_EXTCMD_CMD_SET_ENABLE_REG (0xff)
+#define ILI9805_SETEXTC_PARAMETER1 (0xff)
+#define ILI9805_SETEXTC_PARAMETER2 (0x98)
+#define ILI9805_SETEXTC_PARAMETER3 (0x05)
+
+#define ILI9805_INSTR(_delay, ...) { \
+ .delay = (_delay), \
+ .len = sizeof((u8[]) {__VA_ARGS__}), \
+ .data = (u8[]){__VA_ARGS__} \
+ }
+
+struct ili9805_instr {
+ size_t len;
+ const u8 *data;
+ u32 delay;
+};
+
+struct ili9805_desc {
+ const char *name;
+ const struct ili9805_instr *init;
+ const size_t init_length;
+ const struct drm_display_mode *mode;
+ u32 width_mm;
+ u32 height_mm;
+};
+
+struct ili9805 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ const struct ili9805_desc *desc;
+
+ struct regulator *dvdd;
+ struct regulator *avdd;
+ struct gpio_desc *reset_gpio;
+};
+
+static const struct ili9805_instr gpm1780a0_init[] = {
+ ILI9805_INSTR(100, ILI9805_EXTCMD_CMD_SET_ENABLE_REG, ILI9805_SETEXTC_PARAMETER1,
+ ILI9805_SETEXTC_PARAMETER2, ILI9805_SETEXTC_PARAMETER3),
+ ILI9805_INSTR(100, 0xFD, 0x0F, 0x10, 0x44, 0x00),
+ ILI9805_INSTR(0, 0xf8, 0x18, 0x02, 0x02, 0x18, 0x02, 0x02, 0x30, 0x00,
+ 0x00, 0x30, 0x00, 0x00, 0x30, 0x00, 0x00),
+ ILI9805_INSTR(0, 0xB8, 0x62),
+ ILI9805_INSTR(0, 0xF1, 0x00),
+ ILI9805_INSTR(0, 0xF2, 0x00, 0x58, 0x40),
+ ILI9805_INSTR(0, 0xF3, 0x60, 0x83, 0x04),
+ ILI9805_INSTR(0, 0xFC, 0x04, 0x0F, 0x01),
+ ILI9805_INSTR(0, 0xEB, 0x08, 0x0F),
+ ILI9805_INSTR(0, 0xe0, 0x00, 0x08, 0x0d, 0x0e, 0x0e, 0x0d, 0x0a, 0x08, 0x04,
+ 0x08, 0x0d, 0x0f, 0x0b, 0x1c, 0x14, 0x0a),
+ ILI9805_INSTR(0, 0xe1, 0x00, 0x08, 0x0d, 0x0e, 0x0e, 0x0d, 0x0a, 0x08, 0x04,
+ 0x08, 0x0d, 0x0f, 0x0b, 0x1c, 0x14, 0x0a),
+ ILI9805_INSTR(10, 0xc1, 0x13, 0x39, 0x19, 0x06),
+ ILI9805_INSTR(10, 0xc7, 0xe5),
+ ILI9805_INSTR(10, 0xB1, 0x00, 0x12, 0x14),
+ ILI9805_INSTR(10, 0xB4, 0x02),
+ ILI9805_INSTR(0, 0xBB, 0x14, 0x55),
+ ILI9805_INSTR(0, MIPI_DCS_SET_ADDRESS_MODE, 0x08),
+ ILI9805_INSTR(0, MIPI_DCS_SET_PIXEL_FORMAT, 0x77),
+ ILI9805_INSTR(0, 0x20),
+ ILI9805_INSTR(0, 0xB0, 0x01),
+ ILI9805_INSTR(0, 0xB6, 0x31, 0x00, 0xef),
+ ILI9805_INSTR(0, 0xDF, 0x23),
+ ILI9805_INSTR(0, 0xB9, 0x02, 0x00),
+};
+
+static const struct ili9805_instr tm041xdhg01_init[] = {
+ ILI9805_INSTR(100, ILI9805_EXTCMD_CMD_SET_ENABLE_REG, ILI9805_SETEXTC_PARAMETER1,
+ ILI9805_SETEXTC_PARAMETER2, ILI9805_SETEXTC_PARAMETER3),
+ ILI9805_INSTR(100, 0xFD, 0x0F, 0x13, 0x44, 0x00),
+ ILI9805_INSTR(0, 0xf8, 0x18, 0x02, 0x02, 0x18, 0x02, 0x02, 0x30, 0x01,
+ 0x01, 0x30, 0x01, 0x01, 0x30, 0x01, 0x01),
+ ILI9805_INSTR(0, 0xB8, 0x74),
+ ILI9805_INSTR(0, 0xF1, 0x00),
+ ILI9805_INSTR(0, 0xF2, 0x00, 0x58, 0x40),
+ ILI9805_INSTR(0, 0xFC, 0x04, 0x0F, 0x01),
+ ILI9805_INSTR(0, 0xEB, 0x08, 0x0F),
+ ILI9805_INSTR(0, 0xe0, 0x01, 0x0d, 0x15, 0x0e, 0x0f, 0x0f, 0x0b, 0x08, 0x04,
+ 0x07, 0x0a, 0x0d, 0x0c, 0x15, 0x0f, 0x08),
+ ILI9805_INSTR(0, 0xe1, 0x01, 0x0d, 0x15, 0x0e, 0x0f, 0x0f, 0x0b, 0x08, 0x04,
+ 0x07, 0x0a, 0x0d, 0x0c, 0x15, 0x0f, 0x08),
+ ILI9805_INSTR(10, 0xc1, 0x15, 0x03, 0x03, 0x31),
+ ILI9805_INSTR(10, 0xB1, 0x00, 0x12, 0x14),
+ ILI9805_INSTR(10, 0xB4, 0x02),
+ ILI9805_INSTR(0, 0xBB, 0x14, 0x55),
+ ILI9805_INSTR(0, MIPI_DCS_SET_ADDRESS_MODE, 0x0a),
+ ILI9805_INSTR(0, MIPI_DCS_SET_PIXEL_FORMAT, 0x77),
+ ILI9805_INSTR(0, 0x20),
+ ILI9805_INSTR(0, 0xB0, 0x00),
+ ILI9805_INSTR(0, 0xB6, 0x01),
+ ILI9805_INSTR(0, 0xc2, 0x11),
+ ILI9805_INSTR(0, 0x51, 0xFF),
+ ILI9805_INSTR(0, 0x53, 0x24),
+ ILI9805_INSTR(0, 0x55, 0x00),
+};
+
+static inline struct ili9805 *panel_to_ili9805(struct drm_panel *panel)
+{
+ return container_of(panel, struct ili9805, panel);
+}
+
+static int ili9805_power_on(struct ili9805 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ ret = regulator_enable(ctx->avdd);
+ if (ret) {
+ dev_err(dev, "Failed to enable avdd regulator (%d)\n", ret);
+ return ret;
+ }
+
+ ret = regulator_enable(ctx->dvdd);
+ if (ret) {
+ dev_err(dev, "Failed to enable dvdd regulator (%d)\n", ret);
+ regulator_disable(ctx->avdd);
+ return ret;
+ }
+
+ gpiod_set_value(ctx->reset_gpio, 0);
+ usleep_range(5000, 10000);
+ gpiod_set_value(ctx->reset_gpio, 1);
+ msleep(120);
+
+ return 0;
+}
+
+static int ili9805_power_off(struct ili9805 *ctx)
+{
+ gpiod_set_value(ctx->reset_gpio, 0);
+ regulator_disable(ctx->dvdd);
+ regulator_disable(ctx->avdd);
+
+ return 0;
+}
+
+static int ili9805_activate(struct ili9805 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int i, ret;
+
+ for (i = 0; i < ctx->desc->init_length; i++) {
+ const struct ili9805_instr *instr = &ctx->desc->init[i];
+
+ ret = mipi_dsi_dcs_write_buffer(ctx->dsi, instr->data, instr->len);
+ if (ret < 0)
+ return ret;
+
+ if (instr->delay > 0)
+ msleep(instr->delay);
+ }
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(ctx->dsi);
+ if (ret) {
+ dev_err(dev, "Failed to exit sleep mode (%d)\n", ret);
+ return ret;
+ }
+
+ usleep_range(5000, 6000);
+
+ ret = mipi_dsi_dcs_set_display_on(ctx->dsi);
+ if (ret) {
+ dev_err(dev, "Failed to set display ON (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ili9805_prepare(struct drm_panel *panel)
+{
+ struct ili9805 *ctx = panel_to_ili9805(panel);
+ int ret;
+
+ ret = ili9805_power_on(ctx);
+ if (ret)
+ return ret;
+
+ ret = ili9805_activate(ctx);
+ if (ret) {
+ ili9805_power_off(ctx);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ili9805_deactivate(struct ili9805 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ ret = mipi_dsi_dcs_set_display_off(ctx->dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set display OFF (%d)\n", ret);
+ return ret;
+ }
+
+ usleep_range(5000, 10000);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(ctx->dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enter sleep mode (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ili9805_unprepare(struct drm_panel *panel)
+{
+ struct ili9805 *ctx = panel_to_ili9805(panel);
+
+ ili9805_deactivate(ctx);
+ ili9805_power_off(ctx);
+
+ return 0;
+}
+
+static const struct drm_display_mode gpm1780a0_timing = {
+ .clock = 26227,
+
+ .hdisplay = 480,
+ .hsync_start = 480 + 10,
+ .hsync_end = 480 + 10 + 2,
+ .htotal = 480 + 10 + 2 + 36,
+
+ .vdisplay = 480,
+ .vsync_start = 480 + 2,
+ .vsync_end = 480 + 10 + 4,
+ .vtotal = 480 + 2 + 4 + 10,
+};
+
+static const struct drm_display_mode tm041xdhg01_timing = {
+ .clock = 26227,
+
+ .hdisplay = 480,
+ .hsync_start = 480 + 10,
+ .hsync_end = 480 + 10 + 2,
+ .htotal = 480 + 10 + 2 + 36,
+
+ .vdisplay = 768,
+ .vsync_start = 768 + 2,
+ .vsync_end = 768 + 10 + 4,
+ .vtotal = 768 + 2 + 4 + 10,
+};
+
+static int ili9805_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct ili9805 *ctx = panel_to_ili9805(panel);
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, ctx->desc->mode);
+ if (!mode) {
+ dev_err(&ctx->dsi->dev, "failed to add mode %ux%ux@%u\n",
+ ctx->desc->mode->hdisplay,
+ ctx->desc->mode->vdisplay,
+ drm_mode_vrefresh(ctx->desc->mode));
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, mode);
+
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+
+ return 1;
+}
+
+static const struct drm_panel_funcs ili9805_funcs = {
+ .prepare = ili9805_prepare,
+ .unprepare = ili9805_unprepare,
+ .get_modes = ili9805_get_modes,
+};
+
+static int ili9805_dsi_probe(struct mipi_dsi_device *dsi)
+{
+ struct ili9805 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ mipi_dsi_set_drvdata(dsi, ctx);
+ ctx->dsi = dsi;
+ ctx->desc = of_device_get_match_data(&dsi->dev);
+
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_VIDEO |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM |
+ MIPI_DSI_MODE_VIDEO_SYNC_PULSE | MIPI_DSI_MODE_NO_EOT_PACKET;
+ dsi->lanes = 2;
+
+ drm_panel_init(&ctx->panel, &dsi->dev, &ili9805_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ ctx->dvdd = devm_regulator_get(&dsi->dev, "dvdd");
+ if (IS_ERR(ctx->dvdd))
+ return PTR_ERR(ctx->dvdd);
+ ctx->avdd = devm_regulator_get(&dsi->dev, "avdd");
+ if (IS_ERR(ctx->avdd))
+ return PTR_ERR(ctx->avdd);
+
+ ctx->reset_gpio = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio)) {
+ dev_err(&dsi->dev, "Couldn't get our reset GPIO\n");
+ return PTR_ERR(ctx->reset_gpio);
+ }
+
+ ctx->panel.prepare_prev_first = true;
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return ret;
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err(&dsi->dev, "mipi_dsi_attach failed: %d\n", ret);
+ drm_panel_remove(&ctx->panel);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ili9805_dsi_remove(struct mipi_dsi_device *dsi)
+{
+ struct ili9805 *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "failed to detach from DSI host: %d\n",
+ ret);
+
+ drm_panel_remove(&ctx->panel);
+}
+
+static const struct ili9805_desc gpm1780a0_desc = {
+ .init = gpm1780a0_init,
+ .init_length = ARRAY_SIZE(gpm1780a0_init),
+ .mode = &gpm1780a0_timing,
+ .width_mm = 65,
+ .height_mm = 65,
+};
+
+static const struct ili9805_desc tm041xdhg01_desc = {
+ .init = tm041xdhg01_init,
+ .init_length = ARRAY_SIZE(tm041xdhg01_init),
+ .mode = &tm041xdhg01_timing,
+ .width_mm = 42,
+ .height_mm = 96,
+};
+
+static const struct of_device_id ili9805_of_match[] = {
+ { .compatible = "giantplus,gpm1790a0", .data = &gpm1780a0_desc },
+ { .compatible = "tianma,tm041xdhg01", .data = &tm041xdhg01_desc },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ili9805_of_match);
+
+static struct mipi_dsi_driver ili9805_dsi_driver = {
+ .probe = ili9805_dsi_probe,
+ .remove = ili9805_dsi_remove,
+ .driver = {
+ .name = "ili9805-dsi",
+ .of_match_table = ili9805_of_match,
+ },
+};
+module_mipi_dsi_driver(ili9805_dsi_driver);
+
+MODULE_AUTHOR("Matthias Proske <Matthias.Proske@bshg.com>");
+MODULE_AUTHOR("Michael Trimarchi <michael@amarulasolutions.com>");
+MODULE_DESCRIPTION("Ilitek ILI9805 Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
index 6e3670508e3a..30919c872ac8 100644
--- a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
+++ b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
@@ -326,7 +326,7 @@ static const struct drm_display_mode ltk050h3148w_mode = {
static const struct ltk050h3146w_desc ltk050h3148w_data = {
.mode = &ltk050h3148w_mode,
.init = ltk050h3148w_init_sequence,
- .mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE,
+ .mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE | MIPI_DSI_MODE_VIDEO_BURST,
};
static int ltk050h3146w_init_sequence(struct ltk050h3146w *ctx)
diff --git a/drivers/gpu/drm/panel/panel-newvision-nv3052c.c b/drivers/gpu/drm/panel/panel-newvision-nv3052c.c
index 71e57de6d8b2..1aab0c9ae52f 100644
--- a/drivers/gpu/drm/panel/panel-newvision-nv3052c.c
+++ b/drivers/gpu/drm/panel/panel-newvision-nv3052c.c
@@ -20,11 +20,18 @@
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
+struct nv3052c_reg {
+ u8 cmd;
+ u8 val;
+};
+
struct nv3052c_panel_info {
const struct drm_display_mode *display_modes;
unsigned int num_modes;
u16 width_mm, height_mm;
u32 bus_format, bus_flags;
+ const struct nv3052c_reg *panel_regs;
+ unsigned int panel_regs_len;
};
struct nv3052c {
@@ -36,15 +43,10 @@ struct nv3052c {
struct gpio_desc *reset_gpio;
};
-struct nv3052c_reg {
- u8 cmd;
- u8 val;
-};
-
-static const struct nv3052c_reg nv3052c_panel_regs[] = {
- { 0xff, 0x30 },
- { 0xff, 0x52 },
- { 0xff, 0x01 },
+static const struct nv3052c_reg ltk035c5444t_panel_regs[] = {
+ // EXTC Command set enable, select page 1
+ { 0xff, 0x30 }, { 0xff, 0x52 }, { 0xff, 0x01 },
+ // Mostly unknown registers
{ 0xe3, 0x00 },
{ 0x40, 0x00 },
{ 0x03, 0x40 },
@@ -62,15 +64,15 @@ static const struct nv3052c_reg nv3052c_panel_regs[] = {
{ 0x25, 0x06 },
{ 0x26, 0x14 },
{ 0x27, 0x14 },
- { 0x38, 0xcc },
- { 0x39, 0xd7 },
- { 0x3a, 0x4a },
+ { 0x38, 0xcc }, // VCOM_ADJ1
+ { 0x39, 0xd7 }, // VCOM_ADJ2
+ { 0x3a, 0x4a }, // VCOM_ADJ3
{ 0x28, 0x40 },
{ 0x29, 0x01 },
{ 0x2a, 0xdf },
{ 0x49, 0x3c },
- { 0x91, 0x77 },
- { 0x92, 0x77 },
+ { 0x91, 0x77 }, // EXTPW_CTRL2
+ { 0x92, 0x77 }, // EXTPW_CTRL3
{ 0xa0, 0x55 },
{ 0xa1, 0x50 },
{ 0xa4, 0x9c },
@@ -94,123 +96,321 @@ static const struct nv3052c_reg nv3052c_panel_regs[] = {
{ 0xb8, 0x26 },
{ 0xf0, 0x00 },
{ 0xf6, 0xc0 },
- { 0xff, 0x30 },
- { 0xff, 0x52 },
- { 0xff, 0x02 },
- { 0xb0, 0x0b },
- { 0xb1, 0x16 },
- { 0xb2, 0x17 },
- { 0xb3, 0x2c },
- { 0xb4, 0x32 },
- { 0xb5, 0x3b },
- { 0xb6, 0x29 },
- { 0xb7, 0x40 },
- { 0xb8, 0x0d },
- { 0xb9, 0x05 },
- { 0xba, 0x12 },
- { 0xbb, 0x10 },
- { 0xbc, 0x12 },
- { 0xbd, 0x15 },
- { 0xbe, 0x19 },
- { 0xbf, 0x0e },
- { 0xc0, 0x16 },
- { 0xc1, 0x0a },
- { 0xd0, 0x0c },
- { 0xd1, 0x17 },
- { 0xd2, 0x14 },
- { 0xd3, 0x2e },
- { 0xd4, 0x32 },
- { 0xd5, 0x3c },
- { 0xd6, 0x22 },
- { 0xd7, 0x3d },
- { 0xd8, 0x0d },
- { 0xd9, 0x07 },
- { 0xda, 0x13 },
- { 0xdb, 0x13 },
- { 0xdc, 0x11 },
- { 0xdd, 0x15 },
- { 0xde, 0x19 },
- { 0xdf, 0x10 },
- { 0xe0, 0x17 },
- { 0xe1, 0x0a },
- { 0xff, 0x30 },
- { 0xff, 0x52 },
- { 0xff, 0x03 },
- { 0x00, 0x2a },
- { 0x01, 0x2a },
- { 0x02, 0x2a },
- { 0x03, 0x2a },
- { 0x04, 0x61 },
- { 0x05, 0x80 },
- { 0x06, 0xc7 },
- { 0x07, 0x01 },
- { 0x08, 0x03 },
- { 0x09, 0x04 },
- { 0x70, 0x22 },
- { 0x71, 0x80 },
- { 0x30, 0x2a },
- { 0x31, 0x2a },
- { 0x32, 0x2a },
- { 0x33, 0x2a },
- { 0x34, 0x61 },
- { 0x35, 0xc5 },
- { 0x36, 0x80 },
- { 0x37, 0x23 },
- { 0x40, 0x03 },
- { 0x41, 0x04 },
- { 0x42, 0x05 },
- { 0x43, 0x06 },
- { 0x44, 0x11 },
- { 0x45, 0xe8 },
- { 0x46, 0xe9 },
- { 0x47, 0x11 },
- { 0x48, 0xea },
- { 0x49, 0xeb },
- { 0x50, 0x07 },
- { 0x51, 0x08 },
- { 0x52, 0x09 },
- { 0x53, 0x0a },
- { 0x54, 0x11 },
- { 0x55, 0xec },
- { 0x56, 0xed },
- { 0x57, 0x11 },
- { 0x58, 0xef },
- { 0x59, 0xf0 },
- { 0xb1, 0x01 },
- { 0xb4, 0x15 },
- { 0xb5, 0x16 },
- { 0xb6, 0x09 },
- { 0xb7, 0x0f },
- { 0xb8, 0x0d },
- { 0xb9, 0x0b },
- { 0xba, 0x00 },
- { 0xc7, 0x02 },
- { 0xca, 0x17 },
- { 0xcb, 0x18 },
- { 0xcc, 0x0a },
- { 0xcd, 0x10 },
- { 0xce, 0x0e },
- { 0xcf, 0x0c },
- { 0xd0, 0x00 },
- { 0x81, 0x00 },
- { 0x84, 0x15 },
- { 0x85, 0x16 },
- { 0x86, 0x10 },
- { 0x87, 0x0a },
- { 0x88, 0x0c },
- { 0x89, 0x0e },
- { 0x8a, 0x02 },
- { 0x97, 0x00 },
- { 0x9a, 0x17 },
- { 0x9b, 0x18 },
- { 0x9c, 0x0f },
- { 0x9d, 0x09 },
- { 0x9e, 0x0b },
- { 0x9f, 0x0d },
- { 0xa0, 0x01 },
- { 0xff, 0x30 },
- { 0xff, 0x52 },
- { 0xff, 0x02 },
+ // EXTC Command set enable, select page 2
+ { 0xff, 0x30 }, { 0xff, 0x52 }, { 0xff, 0x02 },
+ // Set gray scale voltage to adjust gamma
+ { 0xb0, 0x0b }, // PGAMVR0
+ { 0xb1, 0x16 }, // PGAMVR1
+ { 0xb2, 0x17 }, // PGAMVR2
+ { 0xb3, 0x2c }, // PGAMVR3
+ { 0xb4, 0x32 }, // PGAMVR4
+ { 0xb5, 0x3b }, // PGAMVR5
+ { 0xb6, 0x29 }, // PGAMPR0
+ { 0xb7, 0x40 }, // PGAMPR1
+ { 0xb8, 0x0d }, // PGAMPK0
+ { 0xb9, 0x05 }, // PGAMPK1
+ { 0xba, 0x12 }, // PGAMPK2
+ { 0xbb, 0x10 }, // PGAMPK3
+ { 0xbc, 0x12 }, // PGAMPK4
+ { 0xbd, 0x15 }, // PGAMPK5
+ { 0xbe, 0x19 }, // PGAMPK6
+ { 0xbf, 0x0e }, // PGAMPK7
+ { 0xc0, 0x16 }, // PGAMPK8
+ { 0xc1, 0x0a }, // PGAMPK9
+ // Set gray scale voltage to adjust gamma
+ { 0xd0, 0x0c }, // NGAMVR0
+ { 0xd1, 0x17 }, // NGAMVR0
+ { 0xd2, 0x14 }, // NGAMVR1
+ { 0xd3, 0x2e }, // NGAMVR2
+ { 0xd4, 0x32 }, // NGAMVR3
+ { 0xd5, 0x3c }, // NGAMVR4
+ { 0xd6, 0x22 }, // NGAMPR0
+ { 0xd7, 0x3d }, // NGAMPR1
+ { 0xd8, 0x0d }, // NGAMPK0
+ { 0xd9, 0x07 }, // NGAMPK1
+ { 0xda, 0x13 }, // NGAMPK2
+ { 0xdb, 0x13 }, // NGAMPK3
+ { 0xdc, 0x11 }, // NGAMPK4
+ { 0xdd, 0x15 }, // NGAMPK5
+ { 0xde, 0x19 }, // NGAMPK6
+ { 0xdf, 0x10 }, // NGAMPK7
+ { 0xe0, 0x17 }, // NGAMPK8
+ { 0xe1, 0x0a }, // NGAMPK9
+ // EXTC Command set enable, select page 3
+ { 0xff, 0x30 }, { 0xff, 0x52 }, { 0xff, 0x03 },
+ // Set various timing settings
+ { 0x00, 0x2a }, // GIP_VST_1
+ { 0x01, 0x2a }, // GIP_VST_2
+ { 0x02, 0x2a }, // GIP_VST_3
+ { 0x03, 0x2a }, // GIP_VST_4
+ { 0x04, 0x61 }, // GIP_VST_5
+ { 0x05, 0x80 }, // GIP_VST_6
+ { 0x06, 0xc7 }, // GIP_VST_7
+ { 0x07, 0x01 }, // GIP_VST_8
+ { 0x08, 0x03 }, // GIP_VST_9
+ { 0x09, 0x04 }, // GIP_VST_10
+ { 0x70, 0x22 }, // GIP_ECLK1
+ { 0x71, 0x80 }, // GIP_ECLK2
+ { 0x30, 0x2a }, // GIP_CLK_1
+ { 0x31, 0x2a }, // GIP_CLK_2
+ { 0x32, 0x2a }, // GIP_CLK_3
+ { 0x33, 0x2a }, // GIP_CLK_4
+ { 0x34, 0x61 }, // GIP_CLK_5
+ { 0x35, 0xc5 }, // GIP_CLK_6
+ { 0x36, 0x80 }, // GIP_CLK_7
+ { 0x37, 0x23 }, // GIP_CLK_8
+ { 0x40, 0x03 }, // GIP_CLKA_1
+ { 0x41, 0x04 }, // GIP_CLKA_2
+ { 0x42, 0x05 }, // GIP_CLKA_3
+ { 0x43, 0x06 }, // GIP_CLKA_4
+ { 0x44, 0x11 }, // GIP_CLKA_5
+ { 0x45, 0xe8 }, // GIP_CLKA_6
+ { 0x46, 0xe9 }, // GIP_CLKA_7
+ { 0x47, 0x11 }, // GIP_CLKA_8
+ { 0x48, 0xea }, // GIP_CLKA_9
+ { 0x49, 0xeb }, // GIP_CLKA_10
+ { 0x50, 0x07 }, // GIP_CLKB_1
+ { 0x51, 0x08 }, // GIP_CLKB_2
+ { 0x52, 0x09 }, // GIP_CLKB_3
+ { 0x53, 0x0a }, // GIP_CLKB_4
+ { 0x54, 0x11 }, // GIP_CLKB_5
+ { 0x55, 0xec }, // GIP_CLKB_6
+ { 0x56, 0xed }, // GIP_CLKB_7
+ { 0x57, 0x11 }, // GIP_CLKB_8
+ { 0x58, 0xef }, // GIP_CLKB_9
+ { 0x59, 0xf0 }, // GIP_CLKB_10
+ // Map internal GOA signals to GOA output pad
+ { 0xb1, 0x01 }, // PANELD2U2
+ { 0xb4, 0x15 }, // PANELD2U5
+ { 0xb5, 0x16 }, // PANELD2U6
+ { 0xb6, 0x09 }, // PANELD2U7
+ { 0xb7, 0x0f }, // PANELD2U8
+ { 0xb8, 0x0d }, // PANELD2U9
+ { 0xb9, 0x0b }, // PANELD2U10
+ { 0xba, 0x00 }, // PANELD2U11
+ { 0xc7, 0x02 }, // PANELD2U24
+ { 0xca, 0x17 }, // PANELD2U27
+ { 0xcb, 0x18 }, // PANELD2U28
+ { 0xcc, 0x0a }, // PANELD2U29
+ { 0xcd, 0x10 }, // PANELD2U30
+ { 0xce, 0x0e }, // PANELD2U31
+ { 0xcf, 0x0c }, // PANELD2U32
+ { 0xd0, 0x00 }, // PANELD2U33
+ // Map internal GOA signals to GOA output pad
+ { 0x81, 0x00 }, // PANELU2D2
+ { 0x84, 0x15 }, // PANELU2D5
+ { 0x85, 0x16 }, // PANELU2D6
+ { 0x86, 0x10 }, // PANELU2D7
+ { 0x87, 0x0a }, // PANELU2D8
+ { 0x88, 0x0c }, // PANELU2D9
+ { 0x89, 0x0e }, // PANELU2D10
+ { 0x8a, 0x02 }, // PANELU2D11
+ { 0x97, 0x00 }, // PANELU2D24
+ { 0x9a, 0x17 }, // PANELU2D27
+ { 0x9b, 0x18 }, // PANELU2D28
+ { 0x9c, 0x0f }, // PANELU2D29
+ { 0x9d, 0x09 }, // PANELU2D30
+ { 0x9e, 0x0b }, // PANELU2D31
+ { 0x9f, 0x0d }, // PANELU2D32
+ { 0xa0, 0x01 }, // PANELU2D33
+ // EXTC Command set enable, select page 2
+ { 0xff, 0x30 }, { 0xff, 0x52 }, { 0xff, 0x02 },
+ // Unknown registers
+ { 0x01, 0x01 },
+ { 0x02, 0xda },
+ { 0x03, 0xba },
+ { 0x04, 0xa8 },
+ { 0x05, 0x9a },
+ { 0x06, 0x70 },
+ { 0x07, 0xff },
+ { 0x08, 0x91 },
+ { 0x09, 0x90 },
+ { 0x0a, 0xff },
+ { 0x0b, 0x8f },
+ { 0x0c, 0x60 },
+ { 0x0d, 0x58 },
+ { 0x0e, 0x48 },
+ { 0x0f, 0x38 },
+ { 0x10, 0x2b },
+ // EXTC Command set enable, select page 0
+ { 0xff, 0x30 }, { 0xff, 0x52 }, { 0xff, 0x00 },
+ // Display Access Control
+ { 0x36, 0x0a }, // bgr = 1, ss = 1, gs = 0
+};
+
+static const struct nv3052c_reg fs035vg158_panel_regs[] = {
+ // EXTC Command set enable, select page 1
+ { 0xff, 0x30 }, { 0xff, 0x52 }, { 0xff, 0x01 },
+ // Mostly unknown registers
+ { 0xe3, 0x00 },
+ { 0x40, 0x00 },
+ { 0x03, 0x40 },
+ { 0x04, 0x00 },
+ { 0x05, 0x03 },
+ { 0x08, 0x00 },
+ { 0x09, 0x07 },
+ { 0x0a, 0x01 },
+ { 0x0b, 0x32 },
+ { 0x0c, 0x32 },
+ { 0x0d, 0x0b },
+ { 0x0e, 0x00 },
+ { 0x23, 0x20 }, // RGB interface control: DE MODE PCLK-N
+ { 0x24, 0x0c },
+ { 0x25, 0x06 },
+ { 0x26, 0x14 },
+ { 0x27, 0x14 },
+ { 0x38, 0x9c }, //VCOM_ADJ1, different to ltk035c5444t
+ { 0x39, 0xa7 }, //VCOM_ADJ2, different to ltk035c5444t
+ { 0x3a, 0x50 }, //VCOM_ADJ3, different to ltk035c5444t
+ { 0x28, 0x40 },
+ { 0x29, 0x01 },
+ { 0x2a, 0xdf },
+ { 0x49, 0x3c },
+ { 0x91, 0x57 }, //EXTPW_CTRL2, different to ltk035c5444t
+ { 0x92, 0x57 }, //EXTPW_CTRL3, different to ltk035c5444t
+ { 0xa0, 0x55 },
+ { 0xa1, 0x50 },
+ { 0xa4, 0x9c },
+ { 0xa7, 0x02 },
+ { 0xa8, 0x01 },
+ { 0xa9, 0x01 },
+ { 0xaa, 0xfc },
+ { 0xab, 0x28 },
+ { 0xac, 0x06 },
+ { 0xad, 0x06 },
+ { 0xae, 0x06 },
+ { 0xaf, 0x03 },
+ { 0xb0, 0x08 },
+ { 0xb1, 0x26 },
+ { 0xb2, 0x28 },
+ { 0xb3, 0x28 },
+ { 0xb4, 0x03 }, // Unknown, different to ltk035c5444
+ { 0xb5, 0x08 },
+ { 0xb6, 0x26 },
+ { 0xb7, 0x08 },
+ { 0xb8, 0x26 },
+ { 0xf0, 0x00 },
+ { 0xf6, 0xc0 },
+ // EXTC Command set enable, select page 0
+ { 0xff, 0x30 }, { 0xff, 0x52 }, { 0xff, 0x02 },
+ // Set gray scale voltage to adjust gamma
+ { 0xb0, 0x0b }, // PGAMVR0
+ { 0xb1, 0x16 }, // PGAMVR1
+ { 0xb2, 0x17 }, // PGAMVR2
+ { 0xb3, 0x2c }, // PGAMVR3
+ { 0xb4, 0x32 }, // PGAMVR4
+ { 0xb5, 0x3b }, // PGAMVR5
+ { 0xb6, 0x29 }, // PGAMPR0
+ { 0xb7, 0x40 }, // PGAMPR1
+ { 0xb8, 0x0d }, // PGAMPK0
+ { 0xb9, 0x05 }, // PGAMPK1
+ { 0xba, 0x12 }, // PGAMPK2
+ { 0xbb, 0x10 }, // PGAMPK3
+ { 0xbc, 0x12 }, // PGAMPK4
+ { 0xbd, 0x15 }, // PGAMPK5
+ { 0xbe, 0x19 }, // PGAMPK6
+ { 0xbf, 0x0e }, // PGAMPK7
+ { 0xc0, 0x16 }, // PGAMPK8
+ { 0xc1, 0x0a }, // PGAMPK9
+ // Set gray scale voltage to adjust gamma
+ { 0xd0, 0x0c }, // NGAMVR0
+ { 0xd1, 0x17 }, // NGAMVR0
+ { 0xd2, 0x14 }, // NGAMVR1
+ { 0xd3, 0x2e }, // NGAMVR2
+ { 0xd4, 0x32 }, // NGAMVR3
+ { 0xd5, 0x3c }, // NGAMVR4
+ { 0xd6, 0x22 }, // NGAMPR0
+ { 0xd7, 0x3d }, // NGAMPR1
+ { 0xd8, 0x0d }, // NGAMPK0
+ { 0xd9, 0x07 }, // NGAMPK1
+ { 0xda, 0x13 }, // NGAMPK2
+ { 0xdb, 0x13 }, // NGAMPK3
+ { 0xdc, 0x11 }, // NGAMPK4
+ { 0xdd, 0x15 }, // NGAMPK5
+ { 0xde, 0x19 }, // NGAMPK6
+ { 0xdf, 0x10 }, // NGAMPK7
+ { 0xe0, 0x17 }, // NGAMPK8
+ { 0xe1, 0x0a }, // NGAMPK9
+ // EXTC Command set enable, select page 3
+ { 0xff, 0x30 }, { 0xff, 0x52 }, { 0xff, 0x03 },
+ // Set various timing settings
+ { 0x00, 0x2a }, // GIP_VST_1
+ { 0x01, 0x2a }, // GIP_VST_2
+ { 0x02, 0x2a }, // GIP_VST_3
+ { 0x03, 0x2a }, // GIP_VST_4
+ { 0x04, 0x61 }, // GIP_VST_5
+ { 0x05, 0x80 }, // GIP_VST_6
+ { 0x06, 0xc7 }, // GIP_VST_7
+ { 0x07, 0x01 }, // GIP_VST_8
+ { 0x08, 0x03 }, // GIP_VST_9
+ { 0x09, 0x04 }, // GIP_VST_10
+ { 0x70, 0x22 }, // GIP_ECLK1
+ { 0x71, 0x80 }, // GIP_ECLK2
+ { 0x30, 0x2a }, // GIP_CLK_1
+ { 0x31, 0x2a }, // GIP_CLK_2
+ { 0x32, 0x2a }, // GIP_CLK_3
+ { 0x33, 0x2a }, // GIP_CLK_4
+ { 0x34, 0x61 }, // GIP_CLK_5
+ { 0x35, 0xc5 }, // GIP_CLK_6
+ { 0x36, 0x80 }, // GIP_CLK_7
+ { 0x37, 0x23 }, // GIP_CLK_8
+ { 0x40, 0x03 }, // GIP_CLKA_1
+ { 0x41, 0x04 }, // GIP_CLKA_2
+ { 0x42, 0x05 }, // GIP_CLKA_3
+ { 0x43, 0x06 }, // GIP_CLKA_4
+ { 0x44, 0x11 }, // GIP_CLKA_5
+ { 0x45, 0xe8 }, // GIP_CLKA_6
+ { 0x46, 0xe9 }, // GIP_CLKA_7
+ { 0x47, 0x11 }, // GIP_CLKA_8
+ { 0x48, 0xea }, // GIP_CLKA_9
+ { 0x49, 0xeb }, // GIP_CLKA_10
+ { 0x50, 0x07 }, // GIP_CLKB_1
+ { 0x51, 0x08 }, // GIP_CLKB_2
+ { 0x52, 0x09 }, // GIP_CLKB_3
+ { 0x53, 0x0a }, // GIP_CLKB_4
+ { 0x54, 0x11 }, // GIP_CLKB_5
+ { 0x55, 0xec }, // GIP_CLKB_6
+ { 0x56, 0xed }, // GIP_CLKB_7
+ { 0x57, 0x11 }, // GIP_CLKB_8
+ { 0x58, 0xef }, // GIP_CLKB_9
+ { 0x59, 0xf0 }, // GIP_CLKB_10
+ // Map internal GOA signals to GOA output pad
+ { 0xb1, 0x01 }, // PANELD2U2
+ { 0xb4, 0x15 }, // PANELD2U5
+ { 0xb5, 0x16 }, // PANELD2U6
+ { 0xb6, 0x09 }, // PANELD2U7
+ { 0xb7, 0x0f }, // PANELD2U8
+ { 0xb8, 0x0d }, // PANELD2U9
+ { 0xb9, 0x0b }, // PANELD2U10
+ { 0xba, 0x00 }, // PANELD2U11
+ { 0xc7, 0x02 }, // PANELD2U24
+ { 0xca, 0x17 }, // PANELD2U27
+ { 0xcb, 0x18 }, // PANELD2U28
+ { 0xcc, 0x0a }, // PANELD2U29
+ { 0xcd, 0x10 }, // PANELD2U30
+ { 0xce, 0x0e }, // PANELD2U31
+ { 0xcf, 0x0c }, // PANELD2U32
+ { 0xd0, 0x00 }, // PANELD2U33
+ // Map internal GOA signals to GOA output pad
+ { 0x81, 0x00 }, // PANELU2D2
+ { 0x84, 0x15 }, // PANELU2D5
+ { 0x85, 0x16 }, // PANELU2D6
+ { 0x86, 0x10 }, // PANELU2D7
+ { 0x87, 0x0a }, // PANELU2D8
+ { 0x88, 0x0c }, // PANELU2D9
+ { 0x89, 0x0e }, // PANELU2D10
+ { 0x8a, 0x02 }, // PANELU2D11
+ { 0x97, 0x00 }, // PANELU2D24
+ { 0x9a, 0x17 }, // PANELU2D27
+ { 0x9b, 0x18 }, // PANELU2D28
+ { 0x9c, 0x0f }, // PANELU2D29
+ { 0x9d, 0x09 }, // PANELU2D30
+ { 0x9e, 0x0b }, // PANELU2D31
+ { 0x9f, 0x0d }, // PANELU2D32
+ { 0xa0, 0x01 }, // PANELU2D33
+ // EXTC Command set enable, select page 2
+ { 0xff, 0x30 }, { 0xff, 0x52 }, { 0xff, 0x02 },
+ // Unknown registers
{ 0x01, 0x01 },
{ 0x02, 0xda },
{ 0x03, 0xba },
@@ -227,10 +427,10 @@ static const struct nv3052c_reg nv3052c_panel_regs[] = {
{ 0x0e, 0x48 },
{ 0x0f, 0x38 },
{ 0x10, 0x2b },
- { 0xff, 0x30 },
- { 0xff, 0x52 },
- { 0xff, 0x00 },
- { 0x36, 0x0a },
+ // EXTC Command set enable, select page 0
+ { 0xff, 0x30 }, { 0xff, 0x52 }, { 0xff, 0x00 },
+ // Display Access Control
+ { 0x36, 0x0a }, // bgr = 1, ss = 1, gs = 0
};
static inline struct nv3052c *to_nv3052c(struct drm_panel *panel)
@@ -241,6 +441,8 @@ static inline struct nv3052c *to_nv3052c(struct drm_panel *panel)
static int nv3052c_prepare(struct drm_panel *panel)
{
struct nv3052c *priv = to_nv3052c(panel);
+ const struct nv3052c_reg *panel_regs = priv->panel_info->panel_regs;
+ unsigned int panel_regs_len = priv->panel_info->panel_regs_len;
struct mipi_dbi *dbi = &priv->dbi;
unsigned int i;
int err;
@@ -257,9 +459,9 @@ static int nv3052c_prepare(struct drm_panel *panel)
gpiod_set_value_cansleep(priv->reset_gpio, 0);
usleep_range(5000, 20000);
- for (i = 0; i < ARRAY_SIZE(nv3052c_panel_regs); i++) {
- err = mipi_dbi_command(dbi, nv3052c_panel_regs[i].cmd,
- nv3052c_panel_regs[i].val);
+ for (i = 0; i < panel_regs_len; i++) {
+ err = mipi_dbi_command(dbi, panel_regs[i].cmd,
+ panel_regs[i].val);
if (err) {
dev_err(priv->dev, "Unable to set register: %d\n", err);
@@ -453,6 +655,21 @@ static const struct drm_display_mode ltk035c5444t_modes[] = {
},
};
+static const struct drm_display_mode fs035vg158_modes[] = {
+ { /* 60 Hz */
+ .clock = 21000,
+ .hdisplay = 640,
+ .hsync_start = 640 + 34,
+ .hsync_end = 640 + 34 + 4,
+ .htotal = 640 + 34 + 4 + 20,
+ .vdisplay = 480,
+ .vsync_start = 480 + 12,
+ .vsync_end = 480 + 12 + 4,
+ .vtotal = 480 + 12 + 4 + 6,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ },
+};
+
static const struct nv3052c_panel_info ltk035c5444t_panel_info = {
.display_modes = ltk035c5444t_modes,
.num_modes = ARRAY_SIZE(ltk035c5444t_modes),
@@ -460,10 +677,31 @@ static const struct nv3052c_panel_info ltk035c5444t_panel_info = {
.height_mm = 64,
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
+ .panel_regs = ltk035c5444t_panel_regs,
+ .panel_regs_len = ARRAY_SIZE(ltk035c5444t_panel_regs),
+};
+
+static const struct nv3052c_panel_info fs035vg158_panel_info = {
+ .display_modes = fs035vg158_modes,
+ .num_modes = ARRAY_SIZE(fs035vg158_modes),
+ .width_mm = 70,
+ .height_mm = 53,
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
+ .panel_regs = fs035vg158_panel_regs,
+ .panel_regs_len = ARRAY_SIZE(fs035vg158_panel_regs),
+};
+
+static const struct spi_device_id nv3052c_ids[] = {
+ { "ltk035c5444t", },
+ { "fs035vg158", },
+ { /* sentinel */ }
};
+MODULE_DEVICE_TABLE(spi, nv3052c_ids);
static const struct of_device_id nv3052c_of_match[] = {
{ .compatible = "leadtek,ltk035c5444t", .data = &ltk035c5444t_panel_info },
+ { .compatible = "fascontek,fs035vg158", .data = &fs035vg158_panel_info },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, nv3052c_of_match);
@@ -473,6 +711,7 @@ static struct spi_driver nv3052c_driver = {
.name = "nv3052c",
.of_match_table = nv3052c_of_match,
},
+ .id_table = nv3052c_ids,
.probe = nv3052c_probe,
.remove = nv3052c_remove,
};
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36523.c b/drivers/gpu/drm/panel/panel-novatek-nt36523.c
index 9b9a7eb1bc60..a189ce236328 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt36523.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt36523.c
@@ -1254,9 +1254,9 @@ static int nt36523_probe(struct mipi_dsi_device *dsi)
return dev_err_probe(dev, -EPROBE_DEFER, "cannot get secondary DSI host\n");
pinfo->dsi[1] = mipi_dsi_device_register_full(dsi1_host, info);
- if (!pinfo->dsi[1]) {
+ if (IS_ERR(pinfo->dsi[1])) {
dev_err(dev, "cannot get secondary DSI device\n");
- return -ENODEV;
+ return PTR_ERR(pinfo->dsi[1]);
}
}
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 8017ad33cf18..2214cb09678c 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -1134,6 +1134,37 @@ static const struct panel_desc auo_g133han01 = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
+static const struct display_timing auo_g156han04_timings = {
+ .pixelclock = { 137000000, 141000000, 146000000 },
+ .hactive = { 1920, 1920, 1920 },
+ .hfront_porch = { 60, 60, 60 },
+ .hback_porch = { 90, 92, 111 },
+ .hsync_len = { 32, 32, 32 },
+ .vactive = { 1080, 1080, 1080 },
+ .vfront_porch = { 12, 12, 12 },
+ .vback_porch = { 24, 36, 56 },
+ .vsync_len = { 8, 8, 8 },
+};
+
+static const struct panel_desc auo_g156han04 = {
+ .timings = &auo_g156han04_timings,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 344,
+ .height = 194,
+ },
+ .delay = {
+ .prepare = 50, /* T2 */
+ .enable = 200, /* T3 */
+ .disable = 110, /* T10 */
+ .unprepare = 1000, /* T13 */
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct drm_display_mode auo_g156xtn01_mode = {
.clock = 76000,
.hdisplay = 1366,
@@ -4289,6 +4320,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "auo,g133han01",
.data = &auo_g133han01,
}, {
+ .compatible = "auo,g156han04",
+ .data = &auo_g156han04,
+ }, {
.compatible = "auo,g156xtn01",
.data = &auo_g156xtn01,
}, {
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7701.c b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
index 0459965e1b4f..421eb4592b61 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7701.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
@@ -288,7 +288,7 @@ static void st7701_init_sequence(struct st7701 *st7701)
FIELD_PREP(DSI_CMD2_BK1_PWRCTRL2_AVDD_MASK,
DIV_ROUND_CLOSEST(desc->avdd_mv - 6200, 200)) |
FIELD_PREP(DSI_CMD2_BK1_PWRCTRL2_AVCL_MASK,
- DIV_ROUND_CLOSEST(-4400 + desc->avcl_mv, 200)));
+ DIV_ROUND_CLOSEST(-4400 - desc->avcl_mv, 200)));
/* T2D = 0.2us * T2D[3:0] */
ST7701_DSI(st7701, DSI_CMD2_BK1_SPD1,
@@ -423,6 +423,42 @@ static void kd50t048a_gip_sequence(struct st7701 *st7701)
0xFF, 0xFF, 0xFF, 0xFF, 0x10, 0x45, 0x67, 0x98, 0xBA);
}
+static void rg_arc_gip_sequence(struct st7701 *st7701)
+{
+ st7701_switch_cmd_bkx(st7701, true, 3);
+ ST7701_DSI(st7701, 0xEF, 0x08);
+ st7701_switch_cmd_bkx(st7701, true, 0);
+ ST7701_DSI(st7701, 0xC7, 0x04);
+ ST7701_DSI(st7701, 0xCC, 0x38);
+ st7701_switch_cmd_bkx(st7701, true, 1);
+ ST7701_DSI(st7701, 0xB9, 0x10);
+ ST7701_DSI(st7701, 0xBC, 0x03);
+ ST7701_DSI(st7701, 0xC0, 0x89);
+ ST7701_DSI(st7701, 0xE0, 0x00, 0x00, 0x02);
+ ST7701_DSI(st7701, 0xE1, 0x04, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00,
+ 0x00, 0x00, 0x20, 0x20);
+ ST7701_DSI(st7701, 0xE2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
+ ST7701_DSI(st7701, 0xE3, 0x00, 0x00, 0x33, 0x00);
+ ST7701_DSI(st7701, 0xE4, 0x22, 0x00);
+ ST7701_DSI(st7701, 0xE5, 0x04, 0x5C, 0xA0, 0xA0, 0x06, 0x5C, 0xA0,
+ 0xA0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
+ ST7701_DSI(st7701, 0xE6, 0x00, 0x00, 0x33, 0x00);
+ ST7701_DSI(st7701, 0xE7, 0x22, 0x00);
+ ST7701_DSI(st7701, 0xE8, 0x05, 0x5C, 0xA0, 0xA0, 0x07, 0x5C, 0xA0,
+ 0xA0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
+ ST7701_DSI(st7701, 0xEB, 0x02, 0x00, 0x40, 0x40, 0x00, 0x00, 0x00);
+ ST7701_DSI(st7701, 0xEC, 0x00, 0x00);
+ ST7701_DSI(st7701, 0xED, 0xFA, 0x45, 0x0B, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xB0, 0x54, 0xAF);
+ ST7701_DSI(st7701, 0xEF, 0x08, 0x08, 0x08, 0x45, 0x3F, 0x54);
+ st7701_switch_cmd_bkx(st7701, false, 0);
+ ST7701_DSI(st7701, MIPI_DCS_SET_ADDRESS_MODE, 0x17);
+ ST7701_DSI(st7701, MIPI_DCS_SET_PIXEL_FORMAT, 0x77);
+ ST7701_DSI(st7701, MIPI_DCS_EXIT_SLEEP_MODE, 0x00);
+ msleep(120);
+}
+
static int st7701_prepare(struct drm_panel *panel)
{
struct st7701 *st7701 = panel_to_st7701(panel);
@@ -839,6 +875,105 @@ static const struct st7701_panel_desc kd50t048a_desc = {
.gip_sequence = kd50t048a_gip_sequence,
};
+static const struct drm_display_mode rg_arc_mode = {
+ .clock = 25600,
+
+ .hdisplay = 480,
+ .hsync_start = 480 + 60,
+ .hsync_end = 480 + 60 + 42,
+ .htotal = 480 + 60 + 42 + 60,
+
+ .vdisplay = 640,
+ .vsync_start = 640 + 10,
+ .vsync_end = 640 + 10 + 4,
+ .vtotal = 640 + 10 + 4 + 16,
+
+ .width_mm = 63,
+ .height_mm = 84,
+
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static const struct st7701_panel_desc rg_arc_desc = {
+ .mode = &rg_arc_mode,
+ .lanes = 2,
+ .format = MIPI_DSI_FMT_RGB888,
+ .panel_sleep_delay = 80,
+
+ .pv_gamma = {
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0x01) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC4_MASK, 0x16),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC8_MASK, 0x1d),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC16_MASK, 0x0e),
+
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC24_MASK, 0x12),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC52_MASK, 0x06),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC80_MASK, 0x0c),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC108_MASK, 0x0a),
+
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC147_MASK, 0x09),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC175_MASK, 0x25),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC203_MASK, 0x00),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC231_MASK, 0x03),
+
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC239_MASK, 0x00),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC247_MASK, 0x3f),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC251_MASK, 0x3f),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC255_MASK, 0x1c)
+ },
+ .nv_gamma = {
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0x01) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC4_MASK, 0x16),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC8_MASK, 0x1e),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC16_MASK, 0x0e),
+
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC24_MASK, 0x11),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC52_MASK, 0x06),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC80_MASK, 0x0c),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC108_MASK, 0x08),
+
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC147_MASK, 0x09),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC175_MASK, 0x26),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC203_MASK, 0x00),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC231_MASK, 0x15),
+
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC239_MASK, 0x00),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC247_MASK, 0x3f),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC251_MASK, 0x3f),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC255_MASK, 0x1c)
+ },
+ .nlinv = 0,
+ .vop_uv = 4500000,
+ .vcom_uv = 762500,
+ .vgh_mv = 15000,
+ .vgl_mv = -9510,
+ .avdd_mv = 6600,
+ .avcl_mv = -4400,
+ .gamma_op_bias = OP_BIAS_MIDDLE,
+ .input_op_bias = OP_BIAS_MIN,
+ .output_op_bias = OP_BIAS_MIN,
+ .t2d_ns = 1600,
+ .t3d_ns = 10400,
+ .eot_en = true,
+ .gip_sequence = rg_arc_gip_sequence,
+};
+
static int st7701_dsi_probe(struct mipi_dsi_device *dsi)
{
const struct st7701_panel_desc *desc;
@@ -917,6 +1052,7 @@ static void st7701_dsi_remove(struct mipi_dsi_device *dsi)
}
static const struct of_device_id st7701_of_match[] = {
+ { .compatible = "anbernic,rg-arc-panel", .data = &rg_arc_desc },
{ .compatible = "densitron,dmt028vghmcmi-1a", .data = &dmt028vghmcmi_1a_desc },
{ .compatible = "elida,kd50t048a", .data = &kd50t048a_desc },
{ .compatible = "techstar,ts8550b", .data = &ts8550b_desc },
diff --git a/drivers/gpu/drm/panel/panel-synaptics-r63353.c b/drivers/gpu/drm/panel/panel-synaptics-r63353.c
new file mode 100644
index 000000000000..169c629746c7
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-synaptics-r63353.c
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Synaptics R63353 Controller driver
+ *
+ * Copyright (C) 2020 BSH Hausgerate GmbH
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/media-bus-format.h>
+
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#include <video/mipi_display.h>
+
+#define R63353_INSTR(...) { \
+ .len = sizeof((u8[]) {__VA_ARGS__}), \
+ .data = (u8[]){__VA_ARGS__} \
+ }
+
+struct r63353_instr {
+ size_t len;
+ const u8 *data;
+};
+
+static const struct r63353_instr sharp_ls068b3sx02_init[] = {
+ R63353_INSTR(0x51, 0xff),
+ R63353_INSTR(0x53, 0x0c),
+ R63353_INSTR(0x55, 0x00),
+ R63353_INSTR(0x84, 0x00),
+ R63353_INSTR(0x29),
+};
+
+struct r63353_desc {
+ const char *name;
+ const struct r63353_instr *init;
+ const size_t init_length;
+ const struct drm_display_mode *mode;
+ u32 width_mm;
+ u32 height_mm;
+};
+
+struct r63353_panel {
+ struct drm_panel base;
+ struct mipi_dsi_device *dsi;
+
+ struct gpio_desc *reset_gpio;
+ struct regulator *dvdd;
+ struct regulator *avdd;
+
+ struct r63353_desc *pdata;
+};
+
+static inline struct r63353_panel *to_r63353_panel(struct drm_panel *panel)
+{
+ return container_of(panel, struct r63353_panel, base);
+}
+
+static int r63353_panel_power_on(struct r63353_panel *rpanel)
+{
+ struct mipi_dsi_device *dsi = rpanel->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ ret = regulator_enable(rpanel->avdd);
+ if (ret) {
+ dev_err(dev, "Failed to enable avdd regulator (%d)\n", ret);
+ return ret;
+ }
+
+ usleep_range(15000, 25000);
+
+ ret = regulator_enable(rpanel->dvdd);
+ if (ret) {
+ dev_err(dev, "Failed to enable dvdd regulator (%d)\n", ret);
+ regulator_disable(rpanel->avdd);
+ return ret;
+ }
+
+ usleep_range(300000, 350000);
+ gpiod_set_value(rpanel->reset_gpio, 1);
+ usleep_range(15000, 25000);
+
+ return 0;
+}
+
+static int r63353_panel_power_off(struct r63353_panel *rpanel)
+{
+ gpiod_set_value(rpanel->reset_gpio, 0);
+ regulator_disable(rpanel->dvdd);
+ regulator_disable(rpanel->avdd);
+
+ return 0;
+}
+
+static int r63353_panel_activate(struct r63353_panel *rpanel)
+{
+ struct mipi_dsi_device *dsi = rpanel->dsi;
+ struct device *dev = &dsi->dev;
+ int i, ret;
+
+ ret = mipi_dsi_dcs_soft_reset(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to do Software Reset (%d)\n", ret);
+ goto fail;
+ }
+
+ usleep_range(15000, 17000);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enter sleep mode (%d)\n", ret);
+ goto fail;
+ }
+
+ for (i = 0; i < rpanel->pdata->init_length; i++) {
+ const struct r63353_instr *instr = &rpanel->pdata->init[i];
+
+ ret = mipi_dsi_dcs_write_buffer(dsi, instr->data, instr->len);
+ if (ret < 0)
+ goto fail;
+ }
+
+ msleep(120);
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to exit sleep mode (%d)\n", ret);
+ goto fail;
+ }
+
+ usleep_range(5000, 10000);
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set display ON (%d)\n", ret);
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ gpiod_set_value(rpanel->reset_gpio, 0);
+
+ return ret;
+}
+
+static int r63353_panel_prepare(struct drm_panel *panel)
+{
+ struct r63353_panel *rpanel = to_r63353_panel(panel);
+ struct mipi_dsi_device *dsi = rpanel->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ dev_dbg(dev, "Preparing\n");
+
+ ret = r63353_panel_power_on(rpanel);
+ if (ret)
+ return ret;
+
+ ret = r63353_panel_activate(rpanel);
+ if (ret) {
+ r63353_panel_power_off(rpanel);
+ return ret;
+ }
+
+ dev_dbg(dev, "Prepared\n");
+ return 0;
+}
+
+static int r63353_panel_deactivate(struct r63353_panel *rpanel)
+{
+ struct mipi_dsi_device *dsi = rpanel->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set display OFF (%d)\n", ret);
+ return ret;
+ }
+
+ usleep_range(5000, 10000);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enter sleep mode (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int r63353_panel_unprepare(struct drm_panel *panel)
+{
+ struct r63353_panel *rpanel = to_r63353_panel(panel);
+
+ r63353_panel_deactivate(rpanel);
+ r63353_panel_power_off(rpanel);
+
+ return 0;
+}
+
+static const struct drm_display_mode sharp_ls068b3sx02_timing = {
+ .clock = 70000,
+ .hdisplay = 640,
+ .hsync_start = 640 + 35,
+ .hsync_end = 640 + 35 + 2,
+ .htotal = 640 + 35 + 2 + 150,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 2,
+ .vsync_end = 1280 + 2 + 4,
+ .vtotal = 1280 + 2 + 4 + 0,
+};
+
+static int r63353_panel_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct r63353_panel *rpanel = to_r63353_panel(panel);
+ struct drm_display_mode *mode;
+ static const u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+
+ mode = drm_mode_duplicate(connector->dev, rpanel->pdata->mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = rpanel->pdata->width_mm;
+ connector->display_info.height_mm = rpanel->pdata->height_mm;
+
+ drm_display_info_set_bus_formats(&connector->display_info,
+ &bus_format, 1);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs r63353_panel_funcs = {
+ .prepare = r63353_panel_prepare,
+ .unprepare = r63353_panel_unprepare,
+ .get_modes = r63353_panel_get_modes,
+};
+
+static int r63353_panel_probe(struct mipi_dsi_device *dsi)
+{
+ int ret = 0;
+ struct device *dev = &dsi->dev;
+ struct r63353_panel *panel;
+
+ panel = devm_kzalloc(&dsi->dev, sizeof(*panel), GFP_KERNEL);
+ if (!panel)
+ return -ENOMEM;
+
+ mipi_dsi_set_drvdata(dsi, panel);
+ panel->dsi = dsi;
+ panel->pdata = (struct r63353_desc *)of_device_get_match_data(dev);
+
+ dev_info(dev, "Panel %s\n", panel->pdata->name);
+
+ dsi->lanes = 2;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_VIDEO |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM |
+ MIPI_DSI_MODE_VIDEO_SYNC_PULSE | MIPI_DSI_MODE_NO_EOT_PACKET;
+
+ panel->dvdd = devm_regulator_get(dev, "dvdd");
+ if (IS_ERR(panel->dvdd))
+ return PTR_ERR(panel->dvdd);
+ panel->avdd = devm_regulator_get(dev, "avdd");
+ if (IS_ERR(panel->avdd))
+ return PTR_ERR(panel->avdd);
+
+ panel->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(panel->reset_gpio)) {
+ dev_err(dev, "failed to get RESET GPIO\n");
+ return PTR_ERR(panel->reset_gpio);
+ }
+
+ drm_panel_init(&panel->base, dev, &r63353_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ panel->base.prepare_prev_first = true;
+ ret = drm_panel_of_backlight(&panel->base);
+ if (ret)
+ return ret;
+
+ drm_panel_add(&panel->base);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err(dev, "mipi_dsi_attach failed: %d\n", ret);
+ drm_panel_remove(&panel->base);
+ return ret;
+ }
+
+ return ret;
+}
+
+static void r63353_panel_remove(struct mipi_dsi_device *dsi)
+{
+ struct r63353_panel *rpanel = mipi_dsi_get_drvdata(dsi);
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(dev, "Failed to detach from host (%d)\n", ret);
+
+ drm_panel_remove(&rpanel->base);
+}
+
+static void r63353_panel_shutdown(struct mipi_dsi_device *dsi)
+{
+ struct r63353_panel *rpanel = mipi_dsi_get_drvdata(dsi);
+
+ r63353_panel_unprepare(&rpanel->base);
+}
+
+static const struct r63353_desc sharp_ls068b3sx02_data = {
+ .name = "Sharp LS068B3SX02",
+ .mode = &sharp_ls068b3sx02_timing,
+ .init = sharp_ls068b3sx02_init,
+ .init_length = ARRAY_SIZE(sharp_ls068b3sx02_init),
+ .width_mm = 68,
+ .height_mm = 159,
+};
+
+static const struct of_device_id r63353_of_match[] = {
+ { .compatible = "sharp,ls068b3sx02", .data = &sharp_ls068b3sx02_data },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, r63353_of_match);
+
+static struct mipi_dsi_driver r63353_panel_driver = {
+ .driver = {
+ .name = "r63353-dsi",
+ .of_match_table = r63353_of_match,
+ },
+ .probe = r63353_panel_probe,
+ .remove = r63353_panel_remove,
+ .shutdown = r63353_panel_shutdown,
+};
+
+module_mipi_dsi_driver(r63353_panel_driver);
+
+MODULE_AUTHOR("Matthias Proske <Matthias.Proske@bshg.com>");
+MODULE_AUTHOR("Michael Trimarchi <michael@amarulasolutions.com>");
+MODULE_DESCRIPTION("Synaptics R63353 Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
index f59c82ea8870..2d30da38c2c3 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
@@ -29,14 +29,20 @@ static void panfrost_devfreq_update_utilization(struct panfrost_devfreq *pfdevfr
static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
u32 flags)
{
+ struct panfrost_device *ptdev = dev_get_drvdata(dev);
struct dev_pm_opp *opp;
+ int err;
opp = devfreq_recommended_opp(dev, freq, flags);
if (IS_ERR(opp))
return PTR_ERR(opp);
dev_pm_opp_put(opp);
- return dev_pm_opp_set_rate(dev, *freq);
+ err = dev_pm_opp_set_rate(dev, *freq);
+ if (!err)
+ ptdev->pfdevfreq.current_frequency = *freq;
+
+ return err;
}
static void panfrost_devfreq_reset(struct panfrost_devfreq *pfdevfreq)
@@ -58,7 +64,6 @@ static int panfrost_devfreq_get_dev_status(struct device *dev,
spin_lock_irqsave(&pfdevfreq->lock, irqflags);
panfrost_devfreq_update_utilization(pfdevfreq);
- pfdevfreq->current_frequency = status->current_frequency;
status->total_time = ktime_to_ns(ktime_add(pfdevfreq->busy_time,
pfdevfreq->idle_time));
@@ -165,6 +170,14 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
panfrost_devfreq_profile.initial_freq = cur_freq;
/*
+ * We could wait until panfrost_devfreq_target() to set this value, but
+ * since the simple_ondemand governor works asynchronously, there's a
+ * chance by the time someone opens the device's fdinfo file, current
+ * frequency hasn't been updated yet, so let's just do an early set.
+ */
+ pfdevfreq->current_frequency = cur_freq;
+
+ /*
* Set the recommend OPP this will enable and configure the regulator
* if any and will avoid a switch off by regulator_late_cleanup()
*/
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index 0cf64456e29a..d47b40b82b0b 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -200,7 +200,7 @@ static enum drm_gem_object_status panfrost_gem_status(struct drm_gem_object *obj
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
enum drm_gem_object_status res = 0;
- if (bo->base.pages)
+ if (bo->base.base.import_attach || bo->base.pages)
res |= DRM_GEM_OBJECT_RESIDENT;
if (bo->base.madv == PANFROST_MADV_DONTNEED)
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 4aca09cab4b8..6e537c5bd295 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -29,6 +29,7 @@
#include <linux/pci.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
#include <drm/drm_file.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/radeon_drm.h>
diff --git a/drivers/gpu/drm/radeon/dce3_1_afmt.c b/drivers/gpu/drm/radeon/dce3_1_afmt.c
index e8fe239b9d79..324e9b765098 100644
--- a/drivers/gpu/drm/radeon/dce3_1_afmt.c
+++ b/drivers/gpu/drm/radeon/dce3_1_afmt.c
@@ -21,6 +21,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/hdmi.h>
+#include <drm/drm_edid.h>
#include "radeon.h"
#include "radeon_asic.h"
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 4a1d5447eac1..4c06f47453fd 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -21,6 +21,7 @@
*
*/
#include <linux/hdmi.h>
+#include <drm/drm_edid.h>
#include "dce6_afmt.h"
#include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index f0ae087be914..a424b86008b8 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -26,6 +26,7 @@
#include <linux/pci.h>
#include <linux/slab.h>
+#include <drm/drm_edid.h>
#include <drm/drm_vblank.h>
#include <drm/radeon_drm.h>
#include <drm/drm_fourcc.h>
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index 5f3078f8ab95..681119c91d94 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -26,6 +26,7 @@
*/
#include <linux/hdmi.h>
+#include <drm/drm_edid.h>
#include <drm/radeon_drm.h>
#include "evergreen_hdmi.h"
#include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 85c4bb186203..3596ea4a8b60 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -27,6 +27,7 @@
#include <linux/pci.h>
#include <drm/drm_device.h>
+#include <drm/drm_edid.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
index 279bf130a18c..91b58fbc2be7 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.c
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -27,6 +27,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_eld.h>
+#include <drm/drm_edid.h>
#include "dce6_afmt.h"
#include "evergreen_hdmi.h"
#include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/radeon_audio.h b/drivers/gpu/drm/radeon/radeon_audio.h
index 05e67867469b..dacaaa007051 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.h
+++ b/drivers/gpu/drm/radeon/radeon_audio.h
@@ -27,7 +27,9 @@
#include <linux/types.h>
-#define RREG32_ENDPOINT(block, reg) \
+struct cea_sad;
+
+#define RREG32_ENDPOINT(block, reg) \
radeon_audio_endpoint_rreg(rdev, (block), (reg))
#define WREG32_ENDPOINT(block, reg, v) \
radeon_audio_endpoint_wreg(rdev, (block), (reg), (v))
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 2620efc7c675..6952b1273b0f 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -28,6 +28,7 @@
#include <linux/pci.h>
#include <drm/drm_device.h>
+#include <drm/drm_edid.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 901e75ec70ff..efd18c8d84c8 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -687,11 +687,16 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
if (radeon_crtc == NULL)
return;
+ radeon_crtc->flip_queue = alloc_workqueue("radeon-crtc", WQ_HIGHPRI, 0);
+ if (!radeon_crtc->flip_queue) {
+ kfree(radeon_crtc);
+ return;
+ }
+
drm_crtc_init(dev, &radeon_crtc->base, &radeon_crtc_funcs);
drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256);
radeon_crtc->crtc_id = index;
- radeon_crtc->flip_queue = alloc_workqueue("radeon-crtc", WQ_HIGHPRI, 0);
rdev->mode_info.crtcs[index] = radeon_crtc;
if (rdev->family >= CHIP_BONAIRE) {
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 9cb6401fe97e..3de3dce9e89d 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -26,6 +26,7 @@
#include <linux/pci.h>
+#include <drm/drm_edid.h>
#include <drm/drm_device.h>
#include <drm/radeon_drm.h>
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index 314d066e68e9..3d174390a8af 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -918,7 +918,6 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
i2c->rec = *rec;
i2c->adapter.owner = THIS_MODULE;
- i2c->adapter.class = I2C_CLASS_DDC;
i2c->adapter.dev.parent = dev->dev;
i2c->dev = dev;
i2c_set_adapdata(&i2c->adapter, i2c);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 1decdcec0264..59c4db13d90a 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -32,13 +32,13 @@
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fixed.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
+struct edid;
struct radeon_bo;
struct radeon_device;
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index e6534fa9f1fb..38048593bb4a 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -413,6 +413,7 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig
dev_err(rdev->dev, "(%d) ring map failed\n", r);
return r;
}
+ radeon_debugfs_ring_init(rdev, ring);
}
ring->ptr_mask = (ring->ring_size / 4) - 1;
ring->ring_free_dw = ring->ring_size / 4;
@@ -421,7 +422,6 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig
ring->next_rptr_gpu_addr = rdev->wb.gpu_addr + index;
ring->next_rptr_cpu_addr = &rdev->wb.wb[index/4];
}
- radeon_debugfs_ring_init(rdev, ring);
radeon_ring_lockup_update(rdev, ring);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index 987cabbf1318..c38b4d5d6a14 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -1204,13 +1204,17 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
r = radeon_bo_create(rdev, pd_size, align, true,
RADEON_GEM_DOMAIN_VRAM, 0, NULL,
NULL, &vm->page_directory);
- if (r)
+ if (r) {
+ kfree(vm->page_tables);
+ vm->page_tables = NULL;
return r;
-
+ }
r = radeon_vm_clear_bo(rdev, vm->page_directory);
if (r) {
radeon_bo_unref(&vm->page_directory);
vm->page_directory = NULL;
+ kfree(vm->page_tables);
+ vm->page_tables = NULL;
return r;
}
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
index f74f381af05f..d49c145db437 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.c
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -1493,8 +1493,10 @@ static int sumo_parse_power_table(struct radeon_device *rdev)
non_clock_array_index = power_state->v2.nonClockInfoIndex;
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
&non_clock_info_array->nonClockInfo[non_clock_array_index];
- if (!rdev->pm.power_state[i].clock_info)
+ if (!rdev->pm.power_state[i].clock_info) {
+ kfree(rdev->pm.dpm.ps);
return -EINVAL;
+ }
ps = kzalloc(sizeof(struct sumo_ps), GFP_KERNEL);
if (ps == NULL) {
kfree(rdev->pm.dpm.ps);
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index 08ea1c864cb2..ef1cc7bad20a 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1726,8 +1726,10 @@ static int trinity_parse_power_table(struct radeon_device *rdev)
non_clock_array_index = power_state->v2.nonClockInfoIndex;
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
&non_clock_info_array->nonClockInfo[non_clock_array_index];
- if (!rdev->pm.power_state[i].clock_info)
+ if (!rdev->pm.power_state[i].clock_info) {
+ kfree(rdev->pm.dpm.ps);
return -EINVAL;
+ }
ps = kzalloc(sizeof(struct sumo_ps), GFP_KERNEL);
if (ps == NULL) {
kfree(rdev->pm.dpm.ps);
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index 84aa811ca1e9..bd08d57486fe 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -30,7 +30,6 @@
#include <drm/drm_simple_kms_helper.h>
#include "rockchip_drm_drv.h"
-#include "rockchip_drm_vop.h"
#define RK3288_GRF_SOC_CON6 0x25c
#define RK3288_EDP_LCDC_SEL BIT(5)
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index 21254e4e107a..a855c45ae7f3 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -24,7 +24,6 @@
#include "cdn-dp-core.h"
#include "cdn-dp-reg.h"
-#include "rockchip_drm_vop.h"
static inline struct cdn_dp_device *connector_to_dp(struct drm_connector *connector)
{
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
index 6396f9324dab..4cc8ed8f4fbd 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
@@ -26,7 +26,6 @@
#include <drm/drm_simple_kms_helper.h>
#include "rockchip_drm_drv.h"
-#include "rockchip_drm_vop.h"
#define DSI_PHY_RSTZ 0xa0
#define PHY_DISFORCEPLL 0
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index 341550199111..fe33092abbe7 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -18,7 +18,6 @@
#include <drm/drm_simple_kms_helper.h>
#include "rockchip_drm_drv.h"
-#include "rockchip_drm_vop.h"
#define RK3228_GRF_SOC_CON2 0x0408
#define RK3228_HDMI_SDAIN_MSK BIT(14)
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index 6e5b922a121e..e6fbe040ccf6 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -23,7 +23,6 @@
#include <drm/drm_simple_kms_helper.h>
#include "rockchip_drm_drv.h"
-#include "rockchip_drm_vop.h"
#include "inno_hdmi.h"
@@ -793,7 +792,6 @@ static struct i2c_adapter *inno_hdmi_i2c_adapter(struct inno_hdmi *hdmi)
init_completion(&i2c->cmp);
adap = &i2c->adap;
- adap->class = I2C_CLASS_DDC;
adap->owner = THIS_MODULE;
adap->dev.parent = hdmi->dev;
adap->dev.of_node = hdmi->dev->of_node;
diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.c b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
index 7d561c5a650f..95cd1b49eda8 100644
--- a/drivers/gpu/drm/rockchip/rk3066_hdmi.c
+++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
@@ -18,7 +18,6 @@
#include "rk3066_hdmi.h"
#include "rockchip_drm_drv.h"
-#include "rockchip_drm_vop.h"
#define DEFAULT_PLLA_RATE 30000000
@@ -716,7 +715,6 @@ static struct i2c_adapter *rk3066_hdmi_i2c_adapter(struct rk3066_hdmi *hdmi)
init_completion(&i2c->cmpltn);
adap = &i2c->adap;
- adap->class = I2C_CLASS_DDC;
adap->owner = THIS_MODULE;
adap->dev.parent = hdmi->dev;
adap->dev.of_node = hdmi->dev->of_node;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index aeb03a57240f..bbb9e0bf6804 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -20,6 +20,23 @@
#define ROCKCHIP_MAX_CONNECTOR 2
#define ROCKCHIP_MAX_CRTC 4
+/*
+ * display output interface supported by rockchip lcdc
+ */
+#define ROCKCHIP_OUT_MODE_P888 0
+#define ROCKCHIP_OUT_MODE_BT1120 0
+#define ROCKCHIP_OUT_MODE_P666 1
+#define ROCKCHIP_OUT_MODE_P565 2
+#define ROCKCHIP_OUT_MODE_BT656 5
+#define ROCKCHIP_OUT_MODE_S888 8
+#define ROCKCHIP_OUT_MODE_S888_DUMMY 12
+#define ROCKCHIP_OUT_MODE_YUV420 14
+/* for use special outface */
+#define ROCKCHIP_OUT_MODE_AAAA 15
+
+/* output flags */
+#define ROCKCHIP_OUTPUT_DSI_DUAL BIT(0)
+
struct drm_device;
struct drm_connector;
struct iommu_domain;
@@ -31,6 +48,7 @@ struct rockchip_crtc_state {
int output_bpc;
int output_flags;
bool enable_afbc;
+ bool yuv_overlay;
u32 bus_format;
u32 bus_flags;
int color_space;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index 4b2daefeb8c1..b33e5bdc26be 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -277,18 +277,6 @@ struct vop_data {
/* dst alpha ctrl define */
#define DST_FACTOR_M0(x) (((x) & 0x7) << 6)
-/*
- * display output interface supported by rockchip lcdc
- */
-#define ROCKCHIP_OUT_MODE_P888 0
-#define ROCKCHIP_OUT_MODE_P666 1
-#define ROCKCHIP_OUT_MODE_P565 2
-/* for use special outface */
-#define ROCKCHIP_OUT_MODE_AAAA 15
-
-/* output flags */
-#define ROCKCHIP_OUTPUT_DSI_DUAL BIT(0)
-
enum alpha_mode {
ALPHA_STRAIGHT,
ALPHA_INVERSE,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
index 312da5783362..85b3b4871a1d 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
@@ -35,7 +35,6 @@
#include "rockchip_drm_drv.h"
#include "rockchip_drm_gem.h"
-#include "rockchip_drm_fb.h"
#include "rockchip_drm_vop2.h"
#include "rockchip_rgb.h"
@@ -190,7 +189,10 @@ struct vop2 {
void __iomem *regs;
struct regmap *map;
- struct regmap *grf;
+ struct regmap *sys_grf;
+ struct regmap *vop_grf;
+ struct regmap *vo1_grf;
+ struct regmap *sys_pmu;
/* physical map length of vop2 register */
u32 len;
@@ -209,6 +211,7 @@ struct vop2 {
unsigned int enable_count;
struct clk *hclk;
struct clk *aclk;
+ struct clk *pclk;
/* optional internal rgb encoder */
struct rockchip_rgb *rgb;
@@ -217,6 +220,25 @@ struct vop2 {
struct vop2_win win[];
};
+#define vop2_output_if_is_hdmi(x) ((x) == ROCKCHIP_VOP2_EP_HDMI0 || \
+ (x) == ROCKCHIP_VOP2_EP_HDMI1)
+
+#define vop2_output_if_is_dp(x) ((x) == ROCKCHIP_VOP2_EP_DP0 || \
+ (x) == ROCKCHIP_VOP2_EP_DP1)
+
+#define vop2_output_if_is_edp(x) ((x) == ROCKCHIP_VOP2_EP_EDP0 || \
+ (x) == ROCKCHIP_VOP2_EP_EDP1)
+
+#define vop2_output_if_is_mipi(x) ((x) == ROCKCHIP_VOP2_EP_MIPI0 || \
+ (x) == ROCKCHIP_VOP2_EP_MIPI1)
+
+#define vop2_output_if_is_lvds(x) ((x) == ROCKCHIP_VOP2_EP_LVDS0 || \
+ (x) == ROCKCHIP_VOP2_EP_LVDS1)
+
+#define vop2_output_if_is_dpi(x) ((x) == ROCKCHIP_VOP2_EP_RGB0)
+
+static const struct regmap_config vop2_regmap_config;
+
static struct vop2_video_port *to_vop2_video_port(struct drm_crtc *crtc)
{
return container_of(crtc, struct vop2_video_port, crtc);
@@ -266,12 +288,23 @@ static bool vop2_cluster_window(const struct vop2_win *win)
return win->data->feature & WIN_FEATURE_CLUSTER;
}
+/*
+ * Note:
+ * The write mask function is documented but missing on rk3566/8, writes
+ * to these bits have no effect. For newer soc(rk3588 and following) the
+ * write mask is needed for register writes.
+ *
+ * GLB_CFG_DONE_EN has no write mask bit.
+ *
+ */
static void vop2_cfg_done(struct vop2_video_port *vp)
{
struct vop2 *vop2 = vp->vop2;
+ u32 val = RK3568_REG_CFG_DONE__GLB_CFG_DONE_EN;
+
+ val |= BIT(vp->id) | (BIT(vp->id) << 16);
- regmap_set_bits(vop2->map, RK3568_REG_CFG_DONE,
- BIT(vp->id) | RK3568_REG_CFG_DONE__GLB_CFG_DONE_EN);
+ regmap_set_bits(vop2->map, RK3568_REG_CFG_DONE, val);
}
static void vop2_win_disable(struct vop2_win *win)
@@ -462,6 +495,17 @@ static bool vop2_output_uv_swap(u32 bus_format, u32 output_mode)
return false;
}
+static bool vop2_output_rg_swap(struct vop2 *vop2, u32 bus_format)
+{
+ if (vop2->data->soc_id == 3588) {
+ if (bus_format == MEDIA_BUS_FMT_YUV8_1X24 ||
+ bus_format == MEDIA_BUS_FMT_YUV10_1X30)
+ return true;
+ }
+
+ return false;
+}
+
static bool is_yuv_output(u32 bus_format)
{
switch (bus_format) {
@@ -519,6 +563,18 @@ static bool rockchip_vop2_mod_supported(struct drm_plane *plane, u32 format,
return vop2_convert_afbc_format(format) >= 0;
}
+/*
+ * 0: Full mode, 16 lines for one tail
+ * 1: half block mode, 8 lines one tail
+ */
+static bool vop2_half_block_enable(struct drm_plane_state *pstate)
+{
+ if (pstate->rotation & (DRM_MODE_ROTATE_270 | DRM_MODE_ROTATE_90))
+ return false;
+ else
+ return true;
+}
+
static u32 vop2_afbc_transform_offset(struct drm_plane_state *pstate,
bool afbc_half_block_en)
{
@@ -854,13 +910,32 @@ static int vop2_core_clks_prepare_enable(struct vop2 *vop2)
goto err;
}
+ ret = clk_prepare_enable(vop2->pclk);
+ if (ret < 0) {
+ drm_err(vop2->drm, "failed to enable pclk - %d\n", ret);
+ goto err1;
+ }
+
return 0;
+err1:
+ clk_disable_unprepare(vop2->aclk);
err:
clk_disable_unprepare(vop2->hclk);
return ret;
}
+static void rk3588_vop2_power_domain_enable_all(struct vop2 *vop2)
+{
+ u32 pd;
+
+ pd = vop2_readl(vop2, RK3588_SYS_PD_CTRL);
+ pd &= ~(VOP2_PD_CLUSTER0 | VOP2_PD_CLUSTER1 | VOP2_PD_CLUSTER2 |
+ VOP2_PD_CLUSTER3 | VOP2_PD_ESMART);
+
+ vop2_writel(vop2, RK3588_SYS_PD_CTRL, pd);
+}
+
static void vop2_enable(struct vop2 *vop2)
{
int ret;
@@ -883,11 +958,12 @@ static void vop2_enable(struct vop2 *vop2)
return;
}
- regcache_sync(vop2->map);
-
if (vop2->data->soc_id == 3566)
vop2_writel(vop2, RK3568_OTP_WIN_EN, 1);
+ if (vop2->data->soc_id == 3588)
+ rk3588_vop2_power_domain_enable_all(vop2);
+
vop2_writel(vop2, RK3568_REG_CFG_DONE, RK3568_REG_CFG_DONE__GLB_CFG_DONE_EN);
/*
@@ -913,8 +989,9 @@ static void vop2_disable(struct vop2 *vop2)
pm_runtime_put_sync(vop2->dev);
- regcache_mark_dirty(vop2->map);
+ regcache_drop_region(vop2->map, 0, vop2_regmap_config.max_register);
+ clk_disable_unprepare(vop2->pclk);
clk_disable_unprepare(vop2->aclk);
clk_disable_unprepare(vop2->hclk);
}
@@ -1140,6 +1217,7 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
bool rotate_90 = pstate->rotation & DRM_MODE_ROTATE_90;
struct rockchip_gem_object *rk_obj;
unsigned long offset;
+ bool half_block_en;
bool afbc_en;
dma_addr_t yrgb_mst;
dma_addr_t uv_mst;
@@ -1232,6 +1310,7 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
dsp_info = (dsp_h - 1) << 16 | ((dsp_w - 1) & 0xffff);
format = vop2_convert_format(fb->format->format);
+ half_block_en = vop2_half_block_enable(pstate);
drm_dbg(vop2->drm, "vp%d update %s[%dx%d->%dx%d@%dx%d] fmt[%p4cc_%s] addr[%pad]\n",
vp->id, win->data->name, actual_w, actual_h, dsp_w, dsp_h,
@@ -1239,6 +1318,9 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
&fb->format->format,
afbc_en ? "AFBC" : "", &yrgb_mst);
+ if (vop2_cluster_window(win))
+ vop2_win_write(win, VOP2_WIN_AFBC_HALF_BLOCK_EN, half_block_en);
+
if (afbc_en) {
u32 stride;
@@ -1277,15 +1359,21 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
vop2_win_write(win, VOP2_WIN_AFBC_ENABLE, 1);
vop2_win_write(win, VOP2_WIN_AFBC_FORMAT, afbc_format);
vop2_win_write(win, VOP2_WIN_AFBC_UV_SWAP, uv_swap);
- vop2_win_write(win, VOP2_WIN_AFBC_AUTO_GATING_EN, 0);
+ /*
+ * On rk3566/8, this bit is auto gating enable,
+ * but this function is not work well so we need
+ * to disable it for these two platform.
+ * On rk3588, and the following new soc(rk3528/rk3576),
+ * this bit is gating disable, we should write 1 to
+ * disable gating when enable afbc.
+ */
+ if (vop2->data->soc_id == 3566 || vop2->data->soc_id == 3568)
+ vop2_win_write(win, VOP2_WIN_AFBC_AUTO_GATING_EN, 0);
+ else
+ vop2_win_write(win, VOP2_WIN_AFBC_AUTO_GATING_EN, 1);
+
vop2_win_write(win, VOP2_WIN_AFBC_BLOCK_SPLIT_EN, 0);
- if (pstate->rotation & (DRM_MODE_ROTATE_270 | DRM_MODE_ROTATE_90)) {
- vop2_win_write(win, VOP2_WIN_AFBC_HALF_BLOCK_EN, 0);
- transform_offset = vop2_afbc_transform_offset(pstate, false);
- } else {
- vop2_win_write(win, VOP2_WIN_AFBC_HALF_BLOCK_EN, 1);
- transform_offset = vop2_afbc_transform_offset(pstate, true);
- }
+ transform_offset = vop2_afbc_transform_offset(pstate, half_block_en);
vop2_win_write(win, VOP2_WIN_AFBC_HDR_PTR, yrgb_mst);
vop2_win_write(win, VOP2_WIN_AFBC_PIC_SIZE, act_info);
vop2_win_write(win, VOP2_WIN_AFBC_TRANSFORM_OFFSET, transform_offset);
@@ -1297,6 +1385,11 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
vop2_win_write(win, VOP2_WIN_AFBC_ROTATE_270, rotate_270);
vop2_win_write(win, VOP2_WIN_AFBC_ROTATE_90, rotate_90);
} else {
+ if (vop2_cluster_window(win)) {
+ vop2_win_write(win, VOP2_WIN_AFBC_ENABLE, 0);
+ vop2_win_write(win, VOP2_WIN_AFBC_TRANSFORM_OFFSET, 0);
+ }
+
vop2_win_write(win, VOP2_WIN_YRGB_VIR, DIV_ROUND_UP(fb->pitches[0], 4));
}
@@ -1429,8 +1522,18 @@ static void vop2_post_config(struct drm_crtc *crtc)
u32 top_margin = 100, bottom_margin = 100;
u16 hsize = hdisplay * (left_margin + right_margin) / 200;
u16 vsize = vdisplay * (top_margin + bottom_margin) / 200;
+ u16 hsync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
u16 hact_end, vact_end;
u32 val;
+ u32 bg_dly;
+ u32 pre_scan_dly;
+
+ bg_dly = vp->data->pre_scan_max_dly[3];
+ vop2_writel(vp->vop2, RK3568_VP_BG_MIX_CTRL(vp->id),
+ FIELD_PREP(RK3568_VP_BG_MIX_CTRL__BG_DLY, bg_dly));
+
+ pre_scan_dly = ((bg_dly + (hdisplay >> 1) - 1) << 16) | hsync_len;
+ vop2_vp_write(vp, RK3568_VP_PRE_SCAN_HTIMING, pre_scan_dly);
vsize = rounddown(vsize, 2);
hsize = rounddown(hsize, 2);
@@ -1466,10 +1569,10 @@ static void vop2_post_config(struct drm_crtc *crtc)
vop2_vp_write(vp, RK3568_VP_DSP_BG, 0);
}
-static void rk3568_set_intf_mux(struct vop2_video_port *vp, int id,
- u32 polflags)
+static unsigned long rk3568_set_intf_mux(struct vop2_video_port *vp, int id, u32 polflags)
{
struct vop2 *vop2 = vp->vop2;
+ struct drm_crtc *crtc = &vp->crtc;
u32 die, dip;
die = vop2_readl(vop2, RK3568_DSP_IF_EN);
@@ -1483,9 +1586,9 @@ static void rk3568_set_intf_mux(struct vop2_video_port *vp, int id,
dip &= ~RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL;
dip |= FIELD_PREP(RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL, polflags);
if (polflags & POLFLAG_DCLK_INV)
- regmap_write(vop2->grf, RK3568_GRF_VO_CON1, BIT(3 + 16) | BIT(3));
+ regmap_write(vop2->sys_grf, RK3568_GRF_VO_CON1, BIT(3 + 16) | BIT(3));
else
- regmap_write(vop2->grf, RK3568_GRF_VO_CON1, BIT(3 + 16));
+ regmap_write(vop2->sys_grf, RK3568_GRF_VO_CON1, BIT(3 + 16));
break;
case ROCKCHIP_VOP2_EP_HDMI0:
die &= ~RK3568_SYS_DSP_INFACE_EN_HDMI_MUX;
@@ -1531,13 +1634,280 @@ static void rk3568_set_intf_mux(struct vop2_video_port *vp, int id,
break;
default:
drm_err(vop2->drm, "Invalid interface id %d on vp%d\n", id, vp->id);
- return;
+ return 0;
}
dip |= RK3568_DSP_IF_POL__CFG_DONE_IMD;
vop2_writel(vop2, RK3568_DSP_IF_EN, die);
vop2_writel(vop2, RK3568_DSP_IF_POL, dip);
+
+ return crtc->state->adjusted_mode.crtc_clock * 1000LL;
+}
+
+/*
+ * calc the dclk on rk3588
+ * the available div of dclk is 1, 2, 4
+ */
+static unsigned long rk3588_calc_dclk(unsigned long child_clk, unsigned long max_dclk)
+{
+ if (child_clk * 4 <= max_dclk)
+ return child_clk * 4;
+ else if (child_clk * 2 <= max_dclk)
+ return child_clk * 2;
+ else if (child_clk <= max_dclk)
+ return child_clk;
+ else
+ return 0;
+}
+
+/*
+ * 4 pixclk/cycle on rk3588
+ * RGB/eDP/HDMI: if_pixclk >= dclk_core
+ * DP: dp_pixclk = dclk_out <= dclk_core
+ * DSI: mipi_pixclk <= dclk_out <= dclk_core
+ */
+static unsigned long rk3588_calc_cru_cfg(struct vop2_video_port *vp, int id,
+ int *dclk_core_div, int *dclk_out_div,
+ int *if_pixclk_div, int *if_dclk_div)
+{
+ struct vop2 *vop2 = vp->vop2;
+ struct drm_crtc *crtc = &vp->crtc;
+ struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
+ struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(crtc->state);
+ int output_mode = vcstate->output_mode;
+ unsigned long v_pixclk = adjusted_mode->crtc_clock * 1000LL; /* video timing pixclk */
+ unsigned long dclk_core_rate = v_pixclk >> 2;
+ unsigned long dclk_rate = v_pixclk;
+ unsigned long dclk_out_rate;
+ unsigned long if_pixclk_rate;
+ int K = 1;
+
+ if (vop2_output_if_is_hdmi(id)) {
+ /*
+ * K = 2: dclk_core = if_pixclk_rate > if_dclk_rate
+ * K = 1: dclk_core = hdmie_edp_dclk > if_pixclk_rate
+ */
+ if (output_mode == ROCKCHIP_OUT_MODE_YUV420) {
+ dclk_rate = dclk_rate >> 1;
+ K = 2;
+ }
+
+ if_pixclk_rate = (dclk_core_rate << 1) / K;
+ /*
+ * if_dclk_rate = dclk_core_rate / K;
+ * *if_pixclk_div = dclk_rate / if_pixclk_rate;
+ * *if_dclk_div = dclk_rate / if_dclk_rate;
+ */
+ *if_pixclk_div = 2;
+ *if_dclk_div = 4;
+ } else if (vop2_output_if_is_edp(id)) {
+ /*
+ * edp_pixclk = edp_dclk > dclk_core
+ */
+ if_pixclk_rate = v_pixclk / K;
+ dclk_rate = if_pixclk_rate * K;
+ /*
+ * *if_pixclk_div = dclk_rate / if_pixclk_rate;
+ * *if_dclk_div = *if_pixclk_div;
+ */
+ *if_pixclk_div = K;
+ *if_dclk_div = K;
+ } else if (vop2_output_if_is_dp(id)) {
+ if (output_mode == ROCKCHIP_OUT_MODE_YUV420)
+ dclk_out_rate = v_pixclk >> 3;
+ else
+ dclk_out_rate = v_pixclk >> 2;
+
+ dclk_rate = rk3588_calc_dclk(dclk_out_rate, 600000);
+ if (!dclk_rate) {
+ drm_err(vop2->drm, "DP dclk_out_rate out of range, dclk_out_rate: %ld KHZ\n",
+ dclk_out_rate);
+ return 0;
+ }
+ *dclk_out_div = dclk_rate / dclk_out_rate;
+ } else if (vop2_output_if_is_mipi(id)) {
+ if_pixclk_rate = dclk_core_rate / K;
+ /*
+ * dclk_core = dclk_out * K = if_pixclk * K = v_pixclk / 4
+ */
+ dclk_out_rate = if_pixclk_rate;
+ /*
+ * dclk_rate = N * dclk_core_rate N = (1,2,4 ),
+ * we get a little factor here
+ */
+ dclk_rate = rk3588_calc_dclk(dclk_out_rate, 600000);
+ if (!dclk_rate) {
+ drm_err(vop2->drm, "MIPI dclk out of range, dclk_out_rate: %ld KHZ\n",
+ dclk_out_rate);
+ return 0;
+ }
+ *dclk_out_div = dclk_rate / dclk_out_rate;
+ /*
+ * mipi pixclk == dclk_out
+ */
+ *if_pixclk_div = 1;
+ } else if (vop2_output_if_is_dpi(id)) {
+ dclk_rate = v_pixclk;
+ }
+
+ *dclk_core_div = dclk_rate / dclk_core_rate;
+ *if_pixclk_div = ilog2(*if_pixclk_div);
+ *if_dclk_div = ilog2(*if_dclk_div);
+ *dclk_core_div = ilog2(*dclk_core_div);
+ *dclk_out_div = ilog2(*dclk_out_div);
+
+ drm_dbg(vop2->drm, "dclk: %ld, pixclk_div: %d, dclk_div: %d\n",
+ dclk_rate, *if_pixclk_div, *if_dclk_div);
+
+ return dclk_rate;
+}
+
+/*
+ * MIPI port mux on rk3588:
+ * 0: Video Port2
+ * 1: Video Port3
+ * 3: Video Port 1(MIPI1 only)
+ */
+static u32 rk3588_get_mipi_port_mux(int vp_id)
+{
+ if (vp_id == 1)
+ return 3;
+ else if (vp_id == 3)
+ return 1;
+ else
+ return 0;
+}
+
+static u32 rk3588_get_hdmi_pol(u32 flags)
+{
+ u32 val;
+
+ val = (flags & DRM_MODE_FLAG_NHSYNC) ? BIT(HSYNC_POSITIVE) : 0;
+ val |= (flags & DRM_MODE_FLAG_NVSYNC) ? BIT(VSYNC_POSITIVE) : 0;
+
+ return val;
+}
+
+static unsigned long rk3588_set_intf_mux(struct vop2_video_port *vp, int id, u32 polflags)
+{
+ struct vop2 *vop2 = vp->vop2;
+ int dclk_core_div, dclk_out_div, if_pixclk_div, if_dclk_div;
+ unsigned long clock;
+ u32 die, dip, div, vp_clk_div, val;
+
+ clock = rk3588_calc_cru_cfg(vp, id, &dclk_core_div, &dclk_out_div,
+ &if_pixclk_div, &if_dclk_div);
+ if (!clock)
+ return 0;
+
+ vp_clk_div = FIELD_PREP(RK3588_VP_CLK_CTRL__DCLK_CORE_DIV, dclk_core_div);
+ vp_clk_div |= FIELD_PREP(RK3588_VP_CLK_CTRL__DCLK_OUT_DIV, dclk_out_div);
+
+ die = vop2_readl(vop2, RK3568_DSP_IF_EN);
+ dip = vop2_readl(vop2, RK3568_DSP_IF_POL);
+ div = vop2_readl(vop2, RK3568_DSP_IF_CTRL);
+
+ switch (id) {
+ case ROCKCHIP_VOP2_EP_HDMI0:
+ div &= ~RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV;
+ div &= ~RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV;
+ div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV, if_dclk_div);
+ div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV, if_pixclk_div);
+ die &= ~RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX;
+ die |= RK3588_SYS_DSP_INFACE_EN_HDMI0 |
+ FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX, vp->id);
+ val = rk3588_get_hdmi_pol(polflags);
+ regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 1, 1));
+ regmap_write(vop2->vo1_grf, RK3588_GRF_VO1_CON0, HIWORD_UPDATE(val, 6, 5));
+ break;
+ case ROCKCHIP_VOP2_EP_HDMI1:
+ div &= ~RK3588_DSP_IF_EDP_HDMI1_DCLK_DIV;
+ div &= ~RK3588_DSP_IF_EDP_HDMI1_PCLK_DIV;
+ div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI1_DCLK_DIV, if_dclk_div);
+ div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI1_PCLK_DIV, if_pixclk_div);
+ die &= ~RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX;
+ die |= RK3588_SYS_DSP_INFACE_EN_HDMI1 |
+ FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX, vp->id);
+ val = rk3588_get_hdmi_pol(polflags);
+ regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 4, 4));
+ regmap_write(vop2->vo1_grf, RK3588_GRF_VO1_CON0, HIWORD_UPDATE(val, 8, 7));
+ break;
+ case ROCKCHIP_VOP2_EP_EDP0:
+ div &= ~RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV;
+ div &= ~RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV;
+ div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV, if_dclk_div);
+ div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV, if_pixclk_div);
+ die &= ~RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX;
+ die |= RK3588_SYS_DSP_INFACE_EN_EDP0 |
+ FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX, vp->id);
+ regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 0, 0));
+ break;
+ case ROCKCHIP_VOP2_EP_EDP1:
+ div &= ~RK3588_DSP_IF_EDP_HDMI1_DCLK_DIV;
+ div &= ~RK3588_DSP_IF_EDP_HDMI1_PCLK_DIV;
+ div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV, if_dclk_div);
+ div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV, if_pixclk_div);
+ die &= ~RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX;
+ die |= RK3588_SYS_DSP_INFACE_EN_EDP1 |
+ FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX, vp->id);
+ regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 3, 3));
+ break;
+ case ROCKCHIP_VOP2_EP_MIPI0:
+ div &= ~RK3588_DSP_IF_MIPI0_PCLK_DIV;
+ div |= FIELD_PREP(RK3588_DSP_IF_MIPI0_PCLK_DIV, if_pixclk_div);
+ die &= ~RK3588_SYS_DSP_INFACE_EN_MIPI0_MUX;
+ val = rk3588_get_mipi_port_mux(vp->id);
+ die |= RK3588_SYS_DSP_INFACE_EN_MIPI0 |
+ FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_MIPI0_MUX, !!val);
+ break;
+ case ROCKCHIP_VOP2_EP_MIPI1:
+ div &= ~RK3588_DSP_IF_MIPI1_PCLK_DIV;
+ div |= FIELD_PREP(RK3588_DSP_IF_MIPI1_PCLK_DIV, if_pixclk_div);
+ die &= ~RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX;
+ val = rk3588_get_mipi_port_mux(vp->id);
+ die |= RK3588_SYS_DSP_INFACE_EN_MIPI1 |
+ FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX, val);
+ break;
+ case ROCKCHIP_VOP2_EP_DP0:
+ die &= ~RK3588_SYS_DSP_INFACE_EN_DP0_MUX;
+ die |= RK3588_SYS_DSP_INFACE_EN_DP0 |
+ FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_DP0_MUX, vp->id);
+ dip &= ~RK3588_DSP_IF_POL__DP0_PIN_POL;
+ dip |= FIELD_PREP(RK3588_DSP_IF_POL__DP0_PIN_POL, polflags);
+ break;
+ case ROCKCHIP_VOP2_EP_DP1:
+ die &= ~RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX;
+ die |= RK3588_SYS_DSP_INFACE_EN_MIPI1 |
+ FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX, vp->id);
+ dip &= ~RK3588_DSP_IF_POL__DP1_PIN_POL;
+ dip |= FIELD_PREP(RK3588_DSP_IF_POL__DP1_PIN_POL, polflags);
+ break;
+ default:
+ drm_err(vop2->drm, "Invalid interface id %d on vp%d\n", id, vp->id);
+ return 0;
+ }
+
+ dip |= RK3568_DSP_IF_POL__CFG_DONE_IMD;
+
+ vop2_vp_write(vp, RK3588_VP_CLK_CTRL, vp_clk_div);
+ vop2_writel(vop2, RK3568_DSP_IF_EN, die);
+ vop2_writel(vop2, RK3568_DSP_IF_CTRL, div);
+ vop2_writel(vop2, RK3568_DSP_IF_POL, dip);
+
+ return clock;
+}
+
+static unsigned long vop2_set_intf_mux(struct vop2_video_port *vp, int ep_id, u32 polflags)
+{
+ struct vop2 *vop2 = vp->vop2;
+
+ if (vop2->data->soc_id == 3566 || vop2->data->soc_id == 3568)
+ return rk3568_set_intf_mux(vp, ep_id, polflags);
+ else if (vop2->data->soc_id == 3588)
+ return rk3588_set_intf_mux(vp, ep_id, polflags);
+ else
+ return 0;
}
static int us_to_vertical_line(struct drm_display_mode *mode, int us)
@@ -1592,6 +1962,8 @@ static void vop2_crtc_atomic_enable(struct drm_crtc *crtc,
vop2->enable_count++;
+ vcstate->yuv_overlay = is_yuv_output(vcstate->bus_format);
+
vop2_crtc_enable_irq(vp, VP_INT_POST_BUF_EMPTY);
polflags = 0;
@@ -1605,11 +1977,19 @@ static void vop2_crtc_atomic_enable(struct drm_crtc *crtc,
drm_for_each_encoder_mask(encoder, crtc->dev, crtc_state->encoder_mask) {
struct rockchip_encoder *rkencoder = to_rockchip_encoder(encoder);
- rk3568_set_intf_mux(vp, rkencoder->crtc_endpoint_id, polflags);
+ /*
+ * for drive a high resolution(4KP120, 8K), vop on rk3588/rk3576 need
+ * process multi(1/2/4/8) pixels per cycle, so the dclk feed by the
+ * system cru may be the 1/2 or 1/4 of mode->clock.
+ */
+ clock = vop2_set_intf_mux(vp, rkencoder->crtc_endpoint_id, polflags);
}
+ if (!clock)
+ return;
+
if (vcstate->output_mode == ROCKCHIP_OUT_MODE_AAAA &&
- !(vp_data->feature & VOP_FEATURE_OUTPUT_10BIT))
+ !(vp_data->feature & VOP2_VP_FEATURE_OUTPUT_10BIT))
out_mode = ROCKCHIP_OUT_MODE_P888;
else
out_mode = vcstate->output_mode;
@@ -1618,8 +1998,10 @@ static void vop2_crtc_atomic_enable(struct drm_crtc *crtc,
if (vop2_output_uv_swap(vcstate->bus_format, vcstate->output_mode))
dsp_ctrl |= RK3568_VP_DSP_CTRL__DSP_RB_SWAP;
+ if (vop2_output_rg_swap(vop2, vcstate->bus_format))
+ dsp_ctrl |= RK3568_VP_DSP_CTRL__DSP_RG_SWAP;
- if (is_yuv_output(vcstate->bus_format))
+ if (vcstate->yuv_overlay)
dsp_ctrl |= RK3568_VP_DSP_CTRL__POST_DSP_OUT_R2Y;
vop2_dither_setup(crtc, &dsp_ctrl);
@@ -1923,28 +2305,22 @@ static void vop2_setup_layer_mixer(struct vop2_video_port *vp)
u32 layer_sel = 0;
u32 port_sel;
unsigned int nlayer, ofs;
- struct drm_display_mode *adjusted_mode;
- u16 hsync_len;
- u16 hdisplay;
- u32 bg_dly;
- u32 pre_scan_dly;
+ u32 ovl_ctrl;
int i;
struct vop2_video_port *vp0 = &vop2->vps[0];
struct vop2_video_port *vp1 = &vop2->vps[1];
struct vop2_video_port *vp2 = &vop2->vps[2];
+ struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(vp->crtc.state);
- adjusted_mode = &vp->crtc.state->adjusted_mode;
- hsync_len = adjusted_mode->crtc_hsync_end - adjusted_mode->crtc_hsync_start;
- hdisplay = adjusted_mode->crtc_hdisplay;
-
- bg_dly = vp->data->pre_scan_max_dly[3];
- vop2_writel(vop2, RK3568_VP_BG_MIX_CTRL(vp->id),
- FIELD_PREP(RK3568_VP_BG_MIX_CTRL__BG_DLY, bg_dly));
+ ovl_ctrl = vop2_readl(vop2, RK3568_OVL_CTRL);
+ ovl_ctrl |= RK3568_OVL_CTRL__LAYERSEL_REGDONE_IMD;
+ if (vcstate->yuv_overlay)
+ ovl_ctrl |= RK3568_OVL_CTRL__YUV_MODE(vp->id);
+ else
+ ovl_ctrl &= ~RK3568_OVL_CTRL__YUV_MODE(vp->id);
- pre_scan_dly = ((bg_dly + (hdisplay >> 1) - 1) << 16) | hsync_len;
- vop2_vp_write(vp, RK3568_VP_PRE_SCAN_HTIMING, pre_scan_dly);
+ vop2_writel(vop2, RK3568_OVL_CTRL, ovl_ctrl);
- vop2_writel(vop2, RK3568_OVL_CTRL, 0);
port_sel = vop2_readl(vop2, RK3568_OVL_PORT_SEL);
port_sel &= RK3568_OVL_PORT_SEL__SEL_PORT;
@@ -1985,6 +2361,14 @@ static void vop2_setup_layer_mixer(struct vop2_video_port *vp)
port_sel &= ~RK3568_OVL_PORT_SEL__CLUSTER1;
port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__CLUSTER1, vp->id);
break;
+ case ROCKCHIP_VOP2_CLUSTER2:
+ port_sel &= ~RK3588_OVL_PORT_SEL__CLUSTER2;
+ port_sel |= FIELD_PREP(RK3588_OVL_PORT_SEL__CLUSTER2, vp->id);
+ break;
+ case ROCKCHIP_VOP2_CLUSTER3:
+ port_sel &= ~RK3588_OVL_PORT_SEL__CLUSTER3;
+ port_sel |= FIELD_PREP(RK3588_OVL_PORT_SEL__CLUSTER3, vp->id);
+ break;
case ROCKCHIP_VOP2_ESMART0:
port_sel &= ~RK3568_OVL_PORT_SEL__ESMART0;
port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__ESMART0, vp->id);
@@ -1993,6 +2377,14 @@ static void vop2_setup_layer_mixer(struct vop2_video_port *vp)
port_sel &= ~RK3568_OVL_PORT_SEL__ESMART1;
port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__ESMART1, vp->id);
break;
+ case ROCKCHIP_VOP2_ESMART2:
+ port_sel &= ~RK3588_OVL_PORT_SEL__ESMART2;
+ port_sel |= FIELD_PREP(RK3588_OVL_PORT_SEL__ESMART2, vp->id);
+ break;
+ case ROCKCHIP_VOP2_ESMART3:
+ port_sel &= ~RK3588_OVL_PORT_SEL__ESMART3;
+ port_sel |= FIELD_PREP(RK3588_OVL_PORT_SEL__ESMART3, vp->id);
+ break;
case ROCKCHIP_VOP2_SMART0:
port_sel &= ~RK3568_OVL_PORT_SEL__SMART0;
port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__SMART0, vp->id);
@@ -2018,7 +2410,6 @@ static void vop2_setup_layer_mixer(struct vop2_video_port *vp)
vop2_writel(vop2, RK3568_OVL_LAYER_SEL, layer_sel);
vop2_writel(vop2, RK3568_OVL_PORT_SEL, port_sel);
- vop2_writel(vop2, RK3568_OVL_CTRL, RK3568_OVL_CTRL__LAYERSEL_REGDONE_IMD);
}
static void vop2_setup_dly_for_windows(struct vop2 *vop2)
@@ -2730,8 +3121,29 @@ static int vop2_bind(struct device *dev, struct device *master, void *data)
if (IS_ERR(vop2->lut_regs))
return PTR_ERR(vop2->lut_regs);
}
+ if (vop2_data->feature & VOP2_FEATURE_HAS_SYS_GRF) {
+ vop2->sys_grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf");
+ if (IS_ERR(vop2->sys_grf))
+ return dev_err_probe(dev, PTR_ERR(vop2->sys_grf), "cannot get sys_grf");
+ }
- vop2->grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf");
+ if (vop2_data->feature & VOP2_FEATURE_HAS_VOP_GRF) {
+ vop2->vop_grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,vop-grf");
+ if (IS_ERR(vop2->vop_grf))
+ return dev_err_probe(dev, PTR_ERR(vop2->vop_grf), "cannot get vop_grf");
+ }
+
+ if (vop2_data->feature & VOP2_FEATURE_HAS_VO1_GRF) {
+ vop2->vo1_grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,vo1-grf");
+ if (IS_ERR(vop2->vo1_grf))
+ return dev_err_probe(dev, PTR_ERR(vop2->vo1_grf), "cannot get vo1_grf");
+ }
+
+ if (vop2_data->feature & VOP2_FEATURE_HAS_SYS_PMU) {
+ vop2->sys_pmu = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,pmu");
+ if (IS_ERR(vop2->sys_pmu))
+ return dev_err_probe(dev, PTR_ERR(vop2->sys_pmu), "cannot get sys_pmu");
+ }
vop2->hclk = devm_clk_get(vop2->dev, "hclk");
if (IS_ERR(vop2->hclk)) {
@@ -2745,6 +3157,12 @@ static int vop2_bind(struct device *dev, struct device *master, void *data)
return PTR_ERR(vop2->aclk);
}
+ vop2->pclk = devm_clk_get_optional(vop2->dev, "pclk_vop");
+ if (IS_ERR(vop2->pclk)) {
+ drm_err(vop2->drm, "failed to get pclk source\n");
+ return PTR_ERR(vop2->pclk);
+ }
+
vop2->irq = platform_get_irq(pdev, 0);
if (vop2->irq < 0) {
drm_err(vop2->drm, "cannot find irq for vop2\n");
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
index 56fd31e05238..615a16196aff 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
@@ -7,16 +7,22 @@
#ifndef _ROCKCHIP_DRM_VOP2_H
#define _ROCKCHIP_DRM_VOP2_H
-#include "rockchip_drm_vop.h"
-
#include <linux/regmap.h>
#include <drm/drm_modes.h>
+#include "rockchip_drm_vop.h"
-#define VOP_FEATURE_OUTPUT_10BIT BIT(0)
+#define VOP2_VP_FEATURE_OUTPUT_10BIT BIT(0)
+
+#define VOP2_FEATURE_HAS_SYS_GRF BIT(0)
+#define VOP2_FEATURE_HAS_VO0_GRF BIT(1)
+#define VOP2_FEATURE_HAS_VO1_GRF BIT(2)
+#define VOP2_FEATURE_HAS_VOP_GRF BIT(3)
+#define VOP2_FEATURE_HAS_SYS_PMU BIT(4)
#define WIN_FEATURE_AFBDC BIT(0)
#define WIN_FEATURE_CLUSTER BIT(1)
+#define HIWORD_UPDATE(v, h, l) ((GENMASK(h, l) << 16) | ((v) << (l)))
/*
* the delay number of a window in different mode.
*/
@@ -39,6 +45,18 @@ enum vop2_scale_down_mode {
VOP2_SCALE_DOWN_AVG,
};
+/*
+ * vop2 internal power domain id,
+ * should be all none zero, 0 will be treat as invalid;
+ */
+#define VOP2_PD_CLUSTER0 BIT(0)
+#define VOP2_PD_CLUSTER1 BIT(1)
+#define VOP2_PD_CLUSTER2 BIT(2)
+#define VOP2_PD_CLUSTER3 BIT(3)
+#define VOP2_PD_DSC_8K BIT(5)
+#define VOP2_PD_DSC_4K BIT(6)
+#define VOP2_PD_ESMART BIT(7)
+
enum vop2_win_regs {
VOP2_WIN_ENABLE,
VOP2_WIN_FORMAT,
@@ -139,6 +157,7 @@ struct vop2_video_port_data {
struct vop2_data {
u8 nr_vps;
+ u64 feature;
const struct vop2_win_data *win;
const struct vop2_video_port_data *vp;
struct vop_rect max_input;
@@ -166,19 +185,6 @@ struct vop2_data {
#define WB_YRGB_FIFO_FULL_INTR BIT(18)
#define WB_COMPLETE_INTR BIT(19)
-/*
- * display output interface supported by rockchip lcdc
- */
-#define ROCKCHIP_OUT_MODE_P888 0
-#define ROCKCHIP_OUT_MODE_BT1120 0
-#define ROCKCHIP_OUT_MODE_P666 1
-#define ROCKCHIP_OUT_MODE_P565 2
-#define ROCKCHIP_OUT_MODE_BT656 5
-#define ROCKCHIP_OUT_MODE_S888 8
-#define ROCKCHIP_OUT_MODE_S888_DUMMY 12
-#define ROCKCHIP_OUT_MODE_YUV420 14
-/* for use special outface */
-#define ROCKCHIP_OUT_MODE_AAAA 15
enum vop_csc_format {
CSC_BT601L,
@@ -206,6 +212,11 @@ enum dst_factor_mode {
};
#define RK3568_GRF_VO_CON1 0x0364
+
+#define RK3588_GRF_SOC_CON1 0x0304
+#define RK3588_GRF_VOP_CON2 0x08
+#define RK3588_GRF_VO1_CON0 0x00
+
/* System registers definition */
#define RK3568_REG_CFG_DONE 0x000
#define RK3568_VERSION_INFO 0x004
@@ -214,6 +225,7 @@ enum dst_factor_mode {
#define RK3568_DSP_IF_EN 0x028
#define RK3568_DSP_IF_CTRL 0x02c
#define RK3568_DSP_IF_POL 0x030
+#define RK3588_SYS_PD_CTRL 0x034
#define RK3568_WB_CTRL 0x40
#define RK3568_WB_XSCAL_FACTOR 0x44
#define RK3568_WB_YRGB_MST 0x48
@@ -234,9 +246,14 @@ enum dst_factor_mode {
#define RK3568_VP_INT_RAW_STATUS(vp) (0xAC + (vp) * 0x10)
/* Video Port registers definition */
+#define RK3568_VP0_CTRL_BASE 0x0C00
+#define RK3568_VP1_CTRL_BASE 0x0D00
+#define RK3568_VP2_CTRL_BASE 0x0E00
+#define RK3588_VP3_CTRL_BASE 0x0F00
#define RK3568_VP_DSP_CTRL 0x00
#define RK3568_VP_MIPI_CTRL 0x04
#define RK3568_VP_COLOR_BAR_CTRL 0x08
+#define RK3588_VP_CLK_CTRL 0x0C
#define RK3568_VP_3D_LUT_CTRL 0x10
#define RK3568_VP_3D_LUT_MST 0x20
#define RK3568_VP_DSP_BG 0x2C
@@ -278,6 +295,17 @@ enum dst_factor_mode {
#define RK3568_SMART_DLY_NUM 0x6F8
/* Cluster register definition, offset relative to window base */
+#define RK3568_CLUSTER0_CTRL_BASE 0x1000
+#define RK3568_CLUSTER1_CTRL_BASE 0x1200
+#define RK3588_CLUSTER2_CTRL_BASE 0x1400
+#define RK3588_CLUSTER3_CTRL_BASE 0x1600
+#define RK3568_ESMART0_CTRL_BASE 0x1800
+#define RK3568_ESMART1_CTRL_BASE 0x1A00
+#define RK3568_SMART0_CTRL_BASE 0x1C00
+#define RK3568_SMART1_CTRL_BASE 0x1E00
+#define RK3588_ESMART2_CTRL_BASE 0x1C00
+#define RK3588_ESMART3_CTRL_BASE 0x1E00
+
#define RK3568_CLUSTER_WIN_CTRL0 0x00
#define RK3568_CLUSTER_WIN_CTRL1 0x04
#define RK3568_CLUSTER_WIN_YRGB_MST 0x10
@@ -371,13 +399,18 @@ enum dst_factor_mode {
#define RK3568_VP_DSP_CTRL__DITHER_DOWN_EN BIT(17)
#define RK3568_VP_DSP_CTRL__PRE_DITHER_DOWN_EN BIT(16)
#define RK3568_VP_DSP_CTRL__POST_DSP_OUT_R2Y BIT(15)
+#define RK3568_VP_DSP_CTRL__DSP_RG_SWAP BIT(10)
#define RK3568_VP_DSP_CTRL__DSP_RB_SWAP BIT(9)
+#define RK3568_VP_DSP_CTRL__DSP_BG_SWAP BIT(8)
#define RK3568_VP_DSP_CTRL__DSP_INTERLACE BIT(7)
#define RK3568_VP_DSP_CTRL__DSP_FILED_POL BIT(6)
#define RK3568_VP_DSP_CTRL__P2I_EN BIT(5)
#define RK3568_VP_DSP_CTRL__CORE_DCLK_DIV BIT(4)
#define RK3568_VP_DSP_CTRL__OUT_MODE GENMASK(3, 0)
+#define RK3588_VP_CLK_CTRL__DCLK_OUT_DIV GENMASK(3, 2)
+#define RK3588_VP_CLK_CTRL__DCLK_CORE_DIV GENMASK(1, 0)
+
#define RK3568_VP_POST_SCL_CTRL__VSCALEDOWN BIT(1)
#define RK3568_VP_POST_SCL_CTRL__HSCALEDOWN BIT(0)
@@ -396,11 +429,37 @@ enum dst_factor_mode {
#define RK3568_SYS_DSP_INFACE_EN_HDMI BIT(1)
#define RK3568_SYS_DSP_INFACE_EN_RGB BIT(0)
+#define RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX GENMASK(22, 21)
+#define RK3588_SYS_DSP_INFACE_EN_MIPI0_MUX GENMASK(20, 20)
+#define RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX GENMASK(19, 18)
+#define RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX GENMASK(17, 16)
+#define RK3588_SYS_DSP_INFACE_EN_DP1_MUX GENMASK(15, 14)
+#define RK3588_SYS_DSP_INFACE_EN_DP0_MUX GENMASK(13, 12)
+#define RK3588_SYS_DSP_INFACE_EN_DPI GENMASK(9, 8)
+#define RK3588_SYS_DSP_INFACE_EN_MIPI1 BIT(7)
+#define RK3588_SYS_DSP_INFACE_EN_MIPI0 BIT(6)
+#define RK3588_SYS_DSP_INFACE_EN_HDMI1 BIT(5)
+#define RK3588_SYS_DSP_INFACE_EN_EDP1 BIT(4)
+#define RK3588_SYS_DSP_INFACE_EN_HDMI0 BIT(3)
+#define RK3588_SYS_DSP_INFACE_EN_EDP0 BIT(2)
+#define RK3588_SYS_DSP_INFACE_EN_DP1 BIT(1)
+#define RK3588_SYS_DSP_INFACE_EN_DP0 BIT(0)
+
+#define RK3588_DSP_IF_MIPI1_PCLK_DIV GENMASK(27, 26)
+#define RK3588_DSP_IF_MIPI0_PCLK_DIV GENMASK(25, 24)
+#define RK3588_DSP_IF_EDP_HDMI1_PCLK_DIV GENMASK(22, 22)
+#define RK3588_DSP_IF_EDP_HDMI1_DCLK_DIV GENMASK(21, 20)
+#define RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV GENMASK(18, 18)
+#define RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV GENMASK(17, 16)
+
#define RK3568_DSP_IF_POL__MIPI_PIN_POL GENMASK(19, 16)
#define RK3568_DSP_IF_POL__EDP_PIN_POL GENMASK(15, 12)
#define RK3568_DSP_IF_POL__HDMI_PIN_POL GENMASK(7, 4)
#define RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL GENMASK(3, 0)
+#define RK3588_DSP_IF_POL__DP1_PIN_POL GENMASK(14, 12)
+#define RK3588_DSP_IF_POL__DP0_PIN_POL GENMASK(10, 8)
+
#define RK3568_VP0_MIPI_CTRL__DCLK_DIV2_PHASE_LOCK BIT(5)
#define RK3568_VP0_MIPI_CTRL__DCLK_DIV2 BIT(4)
@@ -415,14 +474,19 @@ enum dst_factor_mode {
#define VOP2_COLOR_KEY_MASK BIT(31)
#define RK3568_OVL_CTRL__LAYERSEL_REGDONE_IMD BIT(28)
+#define RK3568_OVL_CTRL__YUV_MODE(vp) BIT(vp)
#define RK3568_VP_BG_MIX_CTRL__BG_DLY GENMASK(31, 24)
#define RK3568_OVL_PORT_SEL__SEL_PORT GENMASK(31, 16)
#define RK3568_OVL_PORT_SEL__SMART1 GENMASK(31, 30)
#define RK3568_OVL_PORT_SEL__SMART0 GENMASK(29, 28)
+#define RK3588_OVL_PORT_SEL__ESMART3 GENMASK(31, 30)
+#define RK3588_OVL_PORT_SEL__ESMART2 GENMASK(29, 28)
#define RK3568_OVL_PORT_SEL__ESMART1 GENMASK(27, 26)
#define RK3568_OVL_PORT_SEL__ESMART0 GENMASK(25, 24)
+#define RK3588_OVL_PORT_SEL__CLUSTER3 GENMASK(23, 22)
+#define RK3588_OVL_PORT_SEL__CLUSTER2 GENMASK(21, 20)
#define RK3568_OVL_PORT_SEL__CLUSTER1 GENMASK(19, 18)
#define RK3568_OVL_PORT_SEL__CLUSTER0 GENMASK(17, 16)
#define RK3568_OVL_PORT_SET__PORT2_MUX GENMASK(11, 8)
@@ -435,6 +499,10 @@ enum dst_factor_mode {
#define RK3568_CLUSTER_DLY_NUM__CLUSTER0_1 GENMASK(15, 8)
#define RK3568_CLUSTER_DLY_NUM__CLUSTER0_0 GENMASK(7, 0)
+#define RK3568_CLUSTER_WIN_CTRL0__WIN0_EN BIT(0)
+
+#define RK3568_SMART_REGION0_CTRL__WIN0_EN BIT(0)
+
#define RK3568_SMART_DLY_NUM__SMART1 GENMASK(31, 24)
#define RK3568_SMART_DLY_NUM__SMART0 GENMASK(23, 16)
#define RK3568_SMART_DLY_NUM__ESMART1 GENMASK(15, 8)
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
index f0f47e9abf5a..59341654ec32 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
@@ -27,7 +27,6 @@
#include <drm/drm_simple_kms_helper.h>
#include "rockchip_drm_drv.h"
-#include "rockchip_drm_vop.h"
#include "rockchip_lvds.h"
#define DISPLAY_OUTPUT_RGB 0
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c
index c677b71ae516..dbfbde24698e 100644
--- a/drivers/gpu/drm/rockchip/rockchip_rgb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c
@@ -19,7 +19,6 @@
#include <drm/drm_simple_kms_helper.h>
#include "rockchip_drm_drv.h"
-#include "rockchip_drm_vop.h"
#include "rockchip_rgb.h"
struct rockchip_rgb {
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
index 2c45d81983a5..48170694ac6b 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
@@ -34,6 +34,30 @@ static const uint32_t formats_cluster[] = {
DRM_FORMAT_Y210, /* yuv422_10bit non-Linear mode only */
};
+static const uint32_t formats_esmart[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_NV12, /* yuv420_8bit linear mode, 2 plane */
+ DRM_FORMAT_NV21, /* yvu420_8bit linear mode, 2 plane */
+ DRM_FORMAT_NV16, /* yuv422_8bit linear mode, 2 plane */
+ DRM_FORMAT_NV61, /* yvu422_8bit linear mode, 2 plane */
+ DRM_FORMAT_NV20, /* yuv422_10bit linear mode, 2 plane, no padding */
+ DRM_FORMAT_NV24, /* yuv444_8bit linear mode, 2 plane */
+ DRM_FORMAT_NV42, /* yvu444_8bit linear mode, 2 plane */
+ DRM_FORMAT_NV30, /* yuv444_10bit linear mode, 2 plane, no padding */
+ DRM_FORMAT_NV15, /* yuv420_10bit linear mode, 2 plane, no padding */
+ DRM_FORMAT_YVYU, /* yuv422_8bit[YVYU] linear mode */
+ DRM_FORMAT_VYUY, /* yuv422_8bit[VYUY] linear mode */
+ DRM_FORMAT_YUYV, /* yuv422_8bit[YUYV] linear mode */
+ DRM_FORMAT_UYVY, /* yuv422_8bit[UYVY] linear mode */
+};
+
static const uint32_t formats_rk356x_esmart[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
@@ -112,7 +136,7 @@ static const uint64_t format_modifiers_afbc[] = {
static const struct vop2_video_port_data rk3568_vop_video_ports[] = {
{
.id = 0,
- .feature = VOP_FEATURE_OUTPUT_10BIT,
+ .feature = VOP2_VP_FEATURE_OUTPUT_10BIT,
.gamma_lut_len = 1024,
.cubic_lut_len = 9 * 9 * 9,
.max_output = { 4096, 2304 },
@@ -236,7 +260,188 @@ static const struct vop2_win_data rk3568_vop_win_data[] = {
},
};
+static const struct vop2_video_port_data rk3588_vop_video_ports[] = {
+ {
+ .id = 0,
+ .feature = VOP2_VP_FEATURE_OUTPUT_10BIT,
+ .gamma_lut_len = 1024,
+ .cubic_lut_len = 9 * 9 * 9, /* 9x9x9 */
+ .max_output = { 4096, 2304 },
+ /* hdr2sdr sdr2hdr hdr2hdr sdr2sdr */
+ .pre_scan_max_dly = { 76, 65, 65, 54 },
+ .offset = 0xc00,
+ }, {
+ .id = 1,
+ .feature = VOP2_VP_FEATURE_OUTPUT_10BIT,
+ .gamma_lut_len = 1024,
+ .cubic_lut_len = 729, /* 9x9x9 */
+ .max_output = { 4096, 2304 },
+ .pre_scan_max_dly = { 76, 65, 65, 54 },
+ .offset = 0xd00,
+ }, {
+ .id = 2,
+ .feature = VOP2_VP_FEATURE_OUTPUT_10BIT,
+ .gamma_lut_len = 1024,
+ .cubic_lut_len = 17 * 17 * 17, /* 17x17x17 */
+ .max_output = { 4096, 2304 },
+ .pre_scan_max_dly = { 52, 52, 52, 52 },
+ .offset = 0xe00,
+ }, {
+ .id = 3,
+ .gamma_lut_len = 1024,
+ .max_output = { 2048, 1536 },
+ .pre_scan_max_dly = { 52, 52, 52, 52 },
+ .offset = 0xf00,
+ },
+};
+
+/*
+ * rk3588 vop with 4 cluster, 4 esmart win.
+ * Every cluster can work as 4K win or split into two win.
+ * All win in cluster support AFBCD.
+ *
+ * Every esmart win and smart win support 4 Multi-region.
+ *
+ * Scale filter mode:
+ *
+ * * Cluster: bicubic for horizontal scale up, others use bilinear
+ * * ESmart:
+ * * nearest-neighbor/bilinear/bicubic for scale up
+ * * nearest-neighbor/bilinear/average for scale down
+ *
+ * AXI Read ID assignment:
+ * Two AXI bus:
+ * AXI0 is a read/write bus with a higher performance.
+ * AXI1 is a read only bus.
+ *
+ * Every window on a AXI bus must assigned two unique
+ * read id(yrgb_id/uv_id, valid id are 0x1~0xe).
+ *
+ * AXI0:
+ * Cluster0/1, Esmart0/1, WriteBack
+ *
+ * AXI 1:
+ * Cluster2/3, Esmart2/3
+ *
+ */
+static const struct vop2_win_data rk3588_vop_win_data[] = {
+ {
+ .name = "Cluster0-win0",
+ .phys_id = ROCKCHIP_VOP2_CLUSTER0,
+ .base = 0x1000,
+ .formats = formats_cluster,
+ .nformats = ARRAY_SIZE(formats_cluster),
+ .format_modifiers = format_modifiers_afbc,
+ .layer_sel_id = 0,
+ .supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 |
+ DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y,
+ .max_upscale_factor = 4,
+ .max_downscale_factor = 4,
+ .dly = { 4, 26, 29 },
+ .type = DRM_PLANE_TYPE_PRIMARY,
+ .feature = WIN_FEATURE_AFBDC | WIN_FEATURE_CLUSTER,
+ }, {
+ .name = "Cluster1-win0",
+ .phys_id = ROCKCHIP_VOP2_CLUSTER1,
+ .base = 0x1200,
+ .formats = formats_cluster,
+ .nformats = ARRAY_SIZE(formats_cluster),
+ .format_modifiers = format_modifiers_afbc,
+ .layer_sel_id = 1,
+ .supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 |
+ DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y,
+ .type = DRM_PLANE_TYPE_PRIMARY,
+ .max_upscale_factor = 4,
+ .max_downscale_factor = 4,
+ .dly = { 4, 26, 29 },
+ .feature = WIN_FEATURE_AFBDC | WIN_FEATURE_CLUSTER,
+ }, {
+ .name = "Cluster2-win0",
+ .phys_id = ROCKCHIP_VOP2_CLUSTER2,
+ .base = 0x1400,
+ .formats = formats_cluster,
+ .nformats = ARRAY_SIZE(formats_cluster),
+ .format_modifiers = format_modifiers_afbc,
+ .layer_sel_id = 4,
+ .supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 |
+ DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y,
+ .type = DRM_PLANE_TYPE_PRIMARY,
+ .max_upscale_factor = 4,
+ .max_downscale_factor = 4,
+ .dly = { 4, 26, 29 },
+ .feature = WIN_FEATURE_AFBDC | WIN_FEATURE_CLUSTER,
+ }, {
+ .name = "Cluster3-win0",
+ .phys_id = ROCKCHIP_VOP2_CLUSTER3,
+ .base = 0x1600,
+ .formats = formats_cluster,
+ .nformats = ARRAY_SIZE(formats_cluster),
+ .format_modifiers = format_modifiers_afbc,
+ .layer_sel_id = 5,
+ .supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 |
+ DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y,
+ .type = DRM_PLANE_TYPE_PRIMARY,
+ .max_upscale_factor = 4,
+ .max_downscale_factor = 4,
+ .dly = { 4, 26, 29 },
+ .feature = WIN_FEATURE_AFBDC | WIN_FEATURE_CLUSTER,
+ }, {
+ .name = "Esmart0-win0",
+ .phys_id = ROCKCHIP_VOP2_ESMART0,
+ .formats = formats_esmart,
+ .nformats = ARRAY_SIZE(formats_esmart),
+ .format_modifiers = format_modifiers,
+ .base = 0x1800,
+ .layer_sel_id = 2,
+ .supported_rotations = DRM_MODE_REFLECT_Y,
+ .type = DRM_PLANE_TYPE_OVERLAY,
+ .max_upscale_factor = 8,
+ .max_downscale_factor = 8,
+ .dly = { 23, 45, 48 },
+ }, {
+ .name = "Esmart1-win0",
+ .phys_id = ROCKCHIP_VOP2_ESMART1,
+ .formats = formats_esmart,
+ .nformats = ARRAY_SIZE(formats_esmart),
+ .format_modifiers = format_modifiers,
+ .base = 0x1a00,
+ .layer_sel_id = 3,
+ .supported_rotations = DRM_MODE_REFLECT_Y,
+ .type = DRM_PLANE_TYPE_OVERLAY,
+ .max_upscale_factor = 8,
+ .max_downscale_factor = 8,
+ .dly = { 23, 45, 48 },
+ }, {
+ .name = "Esmart2-win0",
+ .phys_id = ROCKCHIP_VOP2_ESMART2,
+ .base = 0x1c00,
+ .formats = formats_esmart,
+ .nformats = ARRAY_SIZE(formats_esmart),
+ .format_modifiers = format_modifiers,
+ .layer_sel_id = 6,
+ .supported_rotations = DRM_MODE_REFLECT_Y,
+ .type = DRM_PLANE_TYPE_OVERLAY,
+ .max_upscale_factor = 8,
+ .max_downscale_factor = 8,
+ .dly = { 23, 45, 48 },
+ }, {
+ .name = "Esmart3-win0",
+ .phys_id = ROCKCHIP_VOP2_ESMART3,
+ .formats = formats_esmart,
+ .nformats = ARRAY_SIZE(formats_esmart),
+ .format_modifiers = format_modifiers,
+ .base = 0x1e00,
+ .layer_sel_id = 7,
+ .supported_rotations = DRM_MODE_REFLECT_Y,
+ .type = DRM_PLANE_TYPE_OVERLAY,
+ .max_upscale_factor = 8,
+ .max_downscale_factor = 8,
+ .dly = { 23, 45, 48 },
+ },
+};
+
static const struct vop2_data rk3566_vop = {
+ .feature = VOP2_FEATURE_HAS_SYS_GRF,
.nr_vps = 3,
.max_input = { 4096, 2304 },
.max_output = { 4096, 2304 },
@@ -247,6 +452,7 @@ static const struct vop2_data rk3566_vop = {
};
static const struct vop2_data rk3568_vop = {
+ .feature = VOP2_FEATURE_HAS_SYS_GRF,
.nr_vps = 3,
.max_input = { 4096, 2304 },
.max_output = { 4096, 2304 },
@@ -256,6 +462,18 @@ static const struct vop2_data rk3568_vop = {
.soc_id = 3568,
};
+static const struct vop2_data rk3588_vop = {
+ .feature = VOP2_FEATURE_HAS_SYS_GRF | VOP2_FEATURE_HAS_VO1_GRF |
+ VOP2_FEATURE_HAS_VOP_GRF | VOP2_FEATURE_HAS_SYS_PMU,
+ .nr_vps = 4,
+ .max_input = { 4096, 4320 },
+ .max_output = { 4096, 4320 },
+ .vp = rk3588_vop_video_ports,
+ .win = rk3588_vop_win_data,
+ .win_size = ARRAY_SIZE(rk3588_vop_win_data),
+ .soc_id = 3588,
+};
+
static const struct of_device_id vop2_dt_match[] = {
{
.compatible = "rockchip,rk3566-vop",
@@ -264,6 +482,9 @@ static const struct of_device_id vop2_dt_match[] = {
.compatible = "rockchip,rk3568-vop",
.data = &rk3568_vop,
}, {
+ .compatible = "rockchip,rk3588-vop",
+ .data = &rk3588_vop
+ }, {
},
};
MODULE_DEVICE_TABLE(of, vop2_dt_match);
diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c
index bef293922b98..3d0e093a7e6e 100644
--- a/drivers/gpu/drm/solomon/ssd130x.c
+++ b/drivers/gpu/drm/solomon/ssd130x.c
@@ -319,7 +319,7 @@ static int ssd130x_pwm_enable(struct ssd130x_device *ssd130x)
pwm_init_state(ssd130x->pwm, &pwmstate);
pwm_set_relative_duty_cycle(&pwmstate, 50, 100);
- pwm_apply_state(ssd130x->pwm, &pwmstate);
+ pwm_apply_might_sleep(ssd130x->pwm, &pwmstate);
/* Enable the PWM */
pwm_enable(ssd130x->pwm);
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c
index d1a65a921f5a..f5f62eb0eeca 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c
@@ -302,7 +302,6 @@ int sun4i_hdmi_i2c_create(struct device *dev, struct sun4i_hdmi *hdmi)
return -ENOMEM;
adap->owner = THIS_MODULE;
- adap->class = I2C_CLASS_DDC;
adap->algo = &sun4i_hdmi_i2c_algorithm;
strscpy(adap->name, "sun4i_hdmi_i2c adapter", sizeof(adap->name));
i2c_set_adapdata(adap, hdmi);
diff --git a/drivers/gpu/drm/tests/drm_exec_test.c b/drivers/gpu/drm/tests/drm_exec_test.c
index 563949d777dd..81f928a429ba 100644
--- a/drivers/gpu/drm/tests/drm_exec_test.c
+++ b/drivers/gpu/drm/tests/drm_exec_test.c
@@ -46,7 +46,7 @@ static void sanitycheck(struct kunit *test)
{
struct drm_exec exec;
- drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
drm_exec_fini(&exec);
KUNIT_SUCCEED(test);
}
@@ -60,7 +60,7 @@ static void test_lock(struct kunit *test)
drm_gem_private_object_init(priv->drm, &gobj, PAGE_SIZE);
- drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
drm_exec_until_all_locked(&exec) {
ret = drm_exec_lock_obj(&exec, &gobj);
drm_exec_retry_on_contention(&exec);
@@ -80,7 +80,7 @@ static void test_lock_unlock(struct kunit *test)
drm_gem_private_object_init(priv->drm, &gobj, PAGE_SIZE);
- drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
drm_exec_until_all_locked(&exec) {
ret = drm_exec_lock_obj(&exec, &gobj);
drm_exec_retry_on_contention(&exec);
@@ -107,7 +107,7 @@ static void test_duplicates(struct kunit *test)
drm_gem_private_object_init(priv->drm, &gobj, PAGE_SIZE);
- drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES);
+ drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
drm_exec_until_all_locked(&exec) {
ret = drm_exec_lock_obj(&exec, &gobj);
drm_exec_retry_on_contention(&exec);
@@ -134,7 +134,7 @@ static void test_prepare(struct kunit *test)
drm_gem_private_object_init(priv->drm, &gobj, PAGE_SIZE);
- drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
drm_exec_until_all_locked(&exec) {
ret = drm_exec_prepare_obj(&exec, &gobj, 1);
drm_exec_retry_on_contention(&exec);
@@ -159,7 +159,7 @@ static void test_prepare_array(struct kunit *test)
drm_gem_private_object_init(priv->drm, &gobj1, PAGE_SIZE);
drm_gem_private_object_init(priv->drm, &gobj2, PAGE_SIZE);
- drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
drm_exec_until_all_locked(&exec)
ret = drm_exec_prepare_array(&exec, array, ARRAY_SIZE(array),
1);
@@ -174,14 +174,14 @@ static void test_multiple_loops(struct kunit *test)
{
struct drm_exec exec;
- drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
drm_exec_until_all_locked(&exec)
{
break;
}
drm_exec_fini(&exec);
- drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
drm_exec_until_all_locked(&exec)
{
break;
diff --git a/drivers/gpu/drm/tests/drm_kunit_helpers.c b/drivers/gpu/drm/tests/drm_kunit_helpers.c
index bccb33b900f3..ca4f8e4c5d5d 100644
--- a/drivers/gpu/drm/tests/drm_kunit_helpers.c
+++ b/drivers/gpu/drm/tests/drm_kunit_helpers.c
@@ -5,6 +5,7 @@
#include <drm/drm_kunit_helpers.h>
#include <drm/drm_managed.h>
+#include <kunit/device.h>
#include <kunit/resource.h>
#include <linux/device.h>
@@ -15,40 +16,6 @@
static const struct drm_mode_config_funcs drm_mode_config_funcs = {
};
-static int fake_probe(struct platform_device *pdev)
-{
- return 0;
-}
-
-static struct platform_driver fake_platform_driver = {
- .probe = fake_probe,
- .driver = {
- .name = KUNIT_DEVICE_NAME,
- },
-};
-
-static void kunit_action_platform_driver_unregister(void *ptr)
-{
- struct platform_driver *drv = ptr;
-
- platform_driver_unregister(drv);
-
-}
-
-static void kunit_action_platform_device_put(void *ptr)
-{
- struct platform_device *pdev = ptr;
-
- platform_device_put(pdev);
-}
-
-static void kunit_action_platform_device_del(void *ptr)
-{
- struct platform_device *pdev = ptr;
-
- platform_device_del(pdev);
-}
-
/**
* drm_kunit_helper_alloc_device - Allocate a mock device for a KUnit test
* @test: The test context object
@@ -66,34 +33,7 @@ static void kunit_action_platform_device_del(void *ptr)
*/
struct device *drm_kunit_helper_alloc_device(struct kunit *test)
{
- struct platform_device *pdev;
- int ret;
-
- ret = platform_driver_register(&fake_platform_driver);
- KUNIT_ASSERT_EQ(test, ret, 0);
-
- ret = kunit_add_action_or_reset(test,
- kunit_action_platform_driver_unregister,
- &fake_platform_driver);
- KUNIT_ASSERT_EQ(test, ret, 0);
-
- pdev = platform_device_alloc(KUNIT_DEVICE_NAME, PLATFORM_DEVID_NONE);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev);
-
- ret = kunit_add_action_or_reset(test,
- kunit_action_platform_device_put,
- pdev);
- KUNIT_ASSERT_EQ(test, ret, 0);
-
- ret = platform_device_add(pdev);
- KUNIT_ASSERT_EQ(test, ret, 0);
-
- ret = kunit_add_action_or_reset(test,
- kunit_action_platform_device_del,
- pdev);
- KUNIT_ASSERT_EQ(test, ret, 0);
-
- return &pdev->dev;
+ return kunit_device_register(test, KUNIT_DEVICE_NAME);
}
EXPORT_SYMBOL_GPL(drm_kunit_helper_alloc_device);
@@ -106,19 +46,7 @@ EXPORT_SYMBOL_GPL(drm_kunit_helper_alloc_device);
*/
void drm_kunit_helper_free_device(struct kunit *test, struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
-
- kunit_release_action(test,
- kunit_action_platform_device_del,
- pdev);
-
- kunit_release_action(test,
- kunit_action_platform_device_put,
- pdev);
-
- kunit_release_action(test,
- kunit_action_platform_driver_unregister,
- &fake_platform_driver);
+ kunit_device_unregister(test, dev);
}
EXPORT_SYMBOL_GPL(drm_kunit_helper_free_device);
diff --git a/drivers/gpu/drm/ttm/tests/ttm_device_test.c b/drivers/gpu/drm/ttm/tests/ttm_device_test.c
index b1b423b68cdf..19eaff22e6ae 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_device_test.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_device_test.c
@@ -175,7 +175,7 @@ static void ttm_device_init_pools(struct kunit *test)
if (params->pools_init_expected) {
for (int i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
- for (int j = 0; j <= MAX_ORDER; ++j) {
+ for (int j = 0; j < NR_PAGE_ORDERS; ++j) {
pt = pool->caching[i].orders[j];
KUNIT_EXPECT_PTR_EQ(test, pt.pool, pool);
KUNIT_EXPECT_EQ(test, pt.caching, i);
diff --git a/drivers/gpu/drm/ttm/tests/ttm_pool_test.c b/drivers/gpu/drm/ttm/tests/ttm_pool_test.c
index 2d9cae8cd984..cceaa18d4e46 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_pool_test.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_pool_test.c
@@ -109,7 +109,7 @@ static const struct ttm_pool_test_case ttm_pool_basic_cases[] = {
},
{
.description = "Above the allocation limit",
- .order = MAX_ORDER + 1,
+ .order = MAX_PAGE_ORDER + 1,
},
{
.description = "One page, with coherent DMA mappings enabled",
@@ -118,7 +118,7 @@ static const struct ttm_pool_test_case ttm_pool_basic_cases[] = {
},
{
.description = "Above the allocation limit, with coherent DMA mappings enabled",
- .order = MAX_ORDER + 1,
+ .order = MAX_PAGE_ORDER + 1,
.use_dma_alloc = true,
},
};
@@ -165,7 +165,7 @@ static void ttm_pool_alloc_basic(struct kunit *test)
fst_page = tt->pages[0];
last_page = tt->pages[tt->num_pages - 1];
- if (params->order <= MAX_ORDER) {
+ if (params->order <= MAX_PAGE_ORDER) {
if (params->use_dma_alloc) {
KUNIT_ASSERT_NOT_NULL(test, (void *)fst_page->private);
KUNIT_ASSERT_NOT_NULL(test, (void *)last_page->private);
@@ -182,7 +182,7 @@ static void ttm_pool_alloc_basic(struct kunit *test)
* order 0 blocks
*/
KUNIT_ASSERT_EQ(test, fst_page->private,
- min_t(unsigned int, MAX_ORDER,
+ min_t(unsigned int, MAX_PAGE_ORDER,
params->order));
KUNIT_ASSERT_EQ(test, last_page->private, 0);
}
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index fe610a3cace0..b62f420a9f96 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -65,11 +65,11 @@ module_param(page_pool_size, ulong, 0644);
static atomic_long_t allocated_pages;
-static struct ttm_pool_type global_write_combined[MAX_ORDER + 1];
-static struct ttm_pool_type global_uncached[MAX_ORDER + 1];
+static struct ttm_pool_type global_write_combined[NR_PAGE_ORDERS];
+static struct ttm_pool_type global_uncached[NR_PAGE_ORDERS];
-static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER + 1];
-static struct ttm_pool_type global_dma32_uncached[MAX_ORDER + 1];
+static struct ttm_pool_type global_dma32_write_combined[NR_PAGE_ORDERS];
+static struct ttm_pool_type global_dma32_uncached[NR_PAGE_ORDERS];
static spinlock_t shrinker_lock;
static struct list_head shrinker_list;
@@ -447,7 +447,7 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
else
gfp_flags |= GFP_HIGHUSER;
- for (order = min_t(unsigned int, MAX_ORDER, __fls(num_pages));
+ for (order = min_t(unsigned int, MAX_PAGE_ORDER, __fls(num_pages));
num_pages;
order = min_t(unsigned int, order, __fls(num_pages))) {
struct ttm_pool_type *pt;
@@ -568,7 +568,7 @@ void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
if (use_dma_alloc || nid != NUMA_NO_NODE) {
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
- for (j = 0; j <= MAX_ORDER; ++j)
+ for (j = 0; j < NR_PAGE_ORDERS; ++j)
ttm_pool_type_init(&pool->caching[i].orders[j],
pool, i, j);
}
@@ -601,7 +601,7 @@ void ttm_pool_fini(struct ttm_pool *pool)
if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE) {
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
- for (j = 0; j <= MAX_ORDER; ++j)
+ for (j = 0; j < NR_PAGE_ORDERS; ++j)
ttm_pool_type_fini(&pool->caching[i].orders[j]);
}
@@ -656,7 +656,7 @@ static void ttm_pool_debugfs_header(struct seq_file *m)
unsigned int i;
seq_puts(m, "\t ");
- for (i = 0; i <= MAX_ORDER; ++i)
+ for (i = 0; i < NR_PAGE_ORDERS; ++i)
seq_printf(m, " ---%2u---", i);
seq_puts(m, "\n");
}
@@ -667,7 +667,7 @@ static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
{
unsigned int i;
- for (i = 0; i <= MAX_ORDER; ++i)
+ for (i = 0; i < NR_PAGE_ORDERS; ++i)
seq_printf(m, " %8u", ttm_pool_type_count(&pt[i]));
seq_puts(m, "\n");
}
@@ -776,7 +776,7 @@ int ttm_pool_mgr_init(unsigned long num_pages)
spin_lock_init(&shrinker_lock);
INIT_LIST_HEAD(&shrinker_list);
- for (i = 0; i <= MAX_ORDER; ++i) {
+ for (i = 0; i < NR_PAGE_ORDERS; ++i) {
ttm_pool_type_init(&global_write_combined[i], NULL,
ttm_write_combined, i);
ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
@@ -816,7 +816,7 @@ void ttm_pool_mgr_fini(void)
{
unsigned int i;
- for (i = 0; i <= MAX_ORDER; ++i) {
+ for (i = 0; i < NR_PAGE_ORDERS; ++i) {
ttm_pool_type_fini(&global_write_combined[i]);
ttm_pool_type_fini(&global_uncached[i]);
diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c
index f843a50d5dce..94eafcecc65b 100644
--- a/drivers/gpu/drm/v3d/v3d_debugfs.c
+++ b/drivers/gpu/drm/v3d/v3d_debugfs.c
@@ -62,9 +62,9 @@ static const struct v3d_reg_def v3d_core_reg_defs[] = {
REGDEF(33, 71, V3D_PTB_BPCA),
REGDEF(33, 71, V3D_PTB_BPCS),
- REGDEF(33, 41, V3D_GMP_STATUS(33)),
- REGDEF(33, 41, V3D_GMP_CFG(33)),
- REGDEF(33, 41, V3D_GMP_VIO_ADDR(33)),
+ REGDEF(33, 42, V3D_GMP_STATUS(33)),
+ REGDEF(33, 42, V3D_GMP_CFG(33)),
+ REGDEF(33, 42, V3D_GMP_VIO_ADDR(33)),
REGDEF(33, 71, V3D_ERR_FDBGO),
REGDEF(33, 71, V3D_ERR_FDBGB),
@@ -74,13 +74,13 @@ static const struct v3d_reg_def v3d_core_reg_defs[] = {
static const struct v3d_reg_def v3d_csd_reg_defs[] = {
REGDEF(41, 71, V3D_CSD_STATUS),
- REGDEF(41, 41, V3D_CSD_CURRENT_CFG0(41)),
- REGDEF(41, 41, V3D_CSD_CURRENT_CFG1(41)),
- REGDEF(41, 41, V3D_CSD_CURRENT_CFG2(41)),
- REGDEF(41, 41, V3D_CSD_CURRENT_CFG3(41)),
- REGDEF(41, 41, V3D_CSD_CURRENT_CFG4(41)),
- REGDEF(41, 41, V3D_CSD_CURRENT_CFG5(41)),
- REGDEF(41, 41, V3D_CSD_CURRENT_CFG6(41)),
+ REGDEF(41, 42, V3D_CSD_CURRENT_CFG0(41)),
+ REGDEF(41, 42, V3D_CSD_CURRENT_CFG1(41)),
+ REGDEF(41, 42, V3D_CSD_CURRENT_CFG2(41)),
+ REGDEF(41, 42, V3D_CSD_CURRENT_CFG3(41)),
+ REGDEF(41, 42, V3D_CSD_CURRENT_CFG4(41)),
+ REGDEF(41, 42, V3D_CSD_CURRENT_CFG5(41)),
+ REGDEF(41, 42, V3D_CSD_CURRENT_CFG6(41)),
REGDEF(71, 71, V3D_CSD_CURRENT_CFG0(71)),
REGDEF(71, 71, V3D_CSD_CURRENT_CFG1(71)),
REGDEF(71, 71, V3D_CSD_CURRENT_CFG2(71)),
diff --git a/drivers/gpu/drm/vc4/tests/vc4_mock.c b/drivers/gpu/drm/vc4/tests/vc4_mock.c
index 63ca46f4cb35..becb3dbaa548 100644
--- a/drivers/gpu/drm/vc4/tests/vc4_mock.c
+++ b/drivers/gpu/drm/vc4/tests/vc4_mock.c
@@ -153,12 +153,9 @@ static int __build_mock(struct kunit *test, struct drm_device *drm,
return 0;
}
-static void kunit_action_drm_dev_unregister(void *ptr)
-{
- struct drm_device *drm = ptr;
-
- drm_dev_unregister(drm);
-}
+KUNIT_DEFINE_ACTION_WRAPPER(kunit_action_drm_dev_unregister,
+ drm_dev_unregister,
+ struct drm_device *);
static struct vc4_dev *__mock_device(struct kunit *test, bool is_vc5)
{
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 25c9c71256d3..f05e2c95a60d 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -672,11 +672,21 @@ vc4_hdmi_connector_duplicate_state(struct drm_connector *connector)
return &new_state->base;
}
+static void vc4_hdmi_connector_destroy_state(struct drm_connector *connector,
+ struct drm_connector_state *state)
+{
+ struct vc4_hdmi_connector_state *vc4_state =
+ conn_state_to_vc4_hdmi_conn_state(state);
+
+ __drm_atomic_helper_connector_destroy_state(state);
+ kfree(vc4_state);
+}
+
static const struct drm_connector_funcs vc4_hdmi_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.reset = vc4_hdmi_connector_reset,
.atomic_duplicate_state = vc4_hdmi_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_destroy_state = vc4_hdmi_connector_destroy_state,
.atomic_get_property = vc4_hdmi_connector_get_property,
.atomic_set_property = vc4_hdmi_connector_set_property,
};
diff --git a/drivers/gpu/drm/vkms/vkms_writeback.c b/drivers/gpu/drm/vkms/vkms_writeback.c
index d7e63aa14663..bc724cbd5e3a 100644
--- a/drivers/gpu/drm/vkms/vkms_writeback.c
+++ b/drivers/gpu/drm/vkms/vkms_writeback.c
@@ -30,17 +30,25 @@ static const struct drm_connector_funcs vkms_wb_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int vkms_wb_encoder_atomic_check(struct drm_encoder *encoder,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
+static int vkms_wb_atomic_check(struct drm_connector *connector,
+ struct drm_atomic_state *state)
{
+ struct drm_connector_state *conn_state =
+ drm_atomic_get_new_connector_state(state, connector);
+ struct drm_crtc_state *crtc_state;
struct drm_framebuffer *fb;
- const struct drm_display_mode *mode = &crtc_state->mode;
+ const struct drm_display_mode *mode;
int ret;
if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
return 0;
+ if (!conn_state->crtc)
+ return 0;
+
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ mode = &crtc_state->mode;
+
fb = conn_state->writeback_job->fb;
if (fb->width != mode->hdisplay || fb->height != mode->vdisplay) {
DRM_DEBUG_KMS("Invalid framebuffer size %ux%u\n",
@@ -48,17 +56,13 @@ static int vkms_wb_encoder_atomic_check(struct drm_encoder *encoder,
return -EINVAL;
}
- ret = drm_atomic_helper_check_wb_encoder_state(encoder, conn_state);
+ ret = drm_atomic_helper_check_wb_connector_state(connector, state);
if (ret < 0)
return ret;
return 0;
}
-static const struct drm_encoder_helper_funcs vkms_wb_encoder_helper_funcs = {
- .atomic_check = vkms_wb_encoder_atomic_check,
-};
-
static int vkms_wb_connector_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
@@ -161,6 +165,7 @@ static const struct drm_connector_helper_funcs vkms_wb_conn_helper_funcs = {
.prepare_writeback_job = vkms_wb_prepare_job,
.cleanup_writeback_job = vkms_wb_cleanup_job,
.atomic_commit = vkms_wb_atomic_commit,
+ .atomic_check = vkms_wb_atomic_check,
};
int vkms_enable_writeback_connector(struct vkms_device *vkmsdev)
@@ -171,7 +176,7 @@ int vkms_enable_writeback_connector(struct vkms_device *vkmsdev)
return drm_writeback_connector_init(&vkmsdev->drm, wb,
&vkms_wb_connector_funcs,
- &vkms_wb_encoder_helper_funcs,
+ NULL,
vkms_wb_formats,
ARRAY_SIZE(vkms_wb_formats),
1);
diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig
index 1b0ef91a5d2c..e36ae1f0d885 100644
--- a/drivers/gpu/drm/xe/Kconfig
+++ b/drivers/gpu/drm/xe/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_XE
tristate "Intel Xe Graphics"
- depends on DRM && PCI && MMU && (m || (y && KUNIT=y))
+ depends on DRM && PCI && MMU && (m || (y && KUNIT=y)) && 64BIT
select INTERVAL_TREE
# we need shmfs for the swappable backing store, and in particular
# the shmem_readpage() which depends upon tmpfs
@@ -19,13 +19,13 @@ config DRM_XE
select DRM_MIPI_DSI
select RELAY
select IRQ_WORK
- # i915 depends on ACPI_VIDEO when ACPI is enabled
+ # xe depends on ACPI_VIDEO when ACPI is enabled
# but for select to work, need to select ACPI_VIDEO's dependencies, ick
select BACKLIGHT_CLASS_DEVICE if ACPI
select INPUT if ACPI
select ACPI_VIDEO if X86 && ACPI
select ACPI_BUTTON if ACPI
- select ACPI_WMI if ACPI
+ select ACPI_WMI if X86 && ACPI
select SYNC_FILE
select IOSF_MBI
select CRC32
diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c
index 9db1867878a4..59fd9bb40c18 100644
--- a/drivers/gpu/drm/xe/xe_exec.c
+++ b/drivers/gpu/drm/xe/xe_exec.c
@@ -230,7 +230,7 @@ retry:
vm_exec.vm = &vm->gpuvm;
vm_exec.flags = DRM_EXEC_INTERRUPTIBLE_WAIT;
if (xe_vm_in_lr_mode(vm)) {
- drm_exec_init(exec, vm_exec.flags);
+ drm_exec_init(exec, vm_exec.flags, 0);
} else {
err = drm_gpuvm_exec_lock(&vm_exec);
if (err) {
diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
index 5c2603075af9..7ce67c9d30a7 100644
--- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
+++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
@@ -194,7 +194,7 @@ retry_userptr:
}
/* Lock VM and BOs dma-resv */
- drm_exec_init(&exec, 0);
+ drm_exec_init(&exec, 0, 0);
drm_exec_until_all_locked(&exec) {
ret = xe_pf_begin(&exec, vma, atomic, tile->id);
drm_exec_retry_on_contention(&exec);
@@ -538,7 +538,7 @@ static int handle_acc(struct xe_gt *gt, struct acc *acc)
goto unlock_vm;
/* Lock VM and BOs dma-resv */
- drm_exec_init(&exec, 0);
+ drm_exec_init(&exec, 0, 0);
drm_exec_until_all_locked(&exec) {
ret = xe_pf_begin(&exec, vma, true, tile->id);
drm_exec_retry_on_contention(&exec);
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index dc609e3cc094..d096a8c00bd4 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -549,7 +549,7 @@ retry:
goto out_unlock_outer;
}
- drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
drm_exec_until_all_locked(&exec) {
bool done = false;
@@ -987,7 +987,7 @@ static void xe_vma_destroy_unlocked(struct xe_vma *vma)
struct drm_exec exec;
int err;
- drm_exec_init(&exec, 0);
+ drm_exec_init(&exec, 0, 0);
drm_exec_until_all_locked(&exec) {
err = xe_vm_prepare_vma(&exec, vma, 0);
drm_exec_retry_on_contention(&exec);
@@ -2126,7 +2126,7 @@ static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
lockdep_assert_held_write(&vm->lock);
if (bo) {
- drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
drm_exec_until_all_locked(&exec) {
err = 0;
if (!bo->vm) {
@@ -2496,7 +2496,7 @@ static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
int err;
retry_userptr:
- drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
drm_exec_until_all_locked(&exec) {
err = op_execute(&exec, vm, vma, op);
drm_exec_retry_on_contention(&exec);