2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
24 #include <linux/module.h>
25 #include <linux/pci.h>
29 #include "amdgpu_smu.h"
30 #include "atomfirmware.h"
31 #include "amdgpu_atomfirmware.h"
32 #include "smu_v11_0.h"
33 #include "soc15_common.h"
35 #include "vega20_ppt.h"
36 #include "navi10_ppt.h"
38 #include "asic_reg/thm/thm_11_0_2_offset.h"
39 #include "asic_reg/thm/thm_11_0_2_sh_mask.h"
40 #include "asic_reg/mp/mp_11_0_offset.h"
41 #include "asic_reg/mp/mp_11_0_sh_mask.h"
42 #include "asic_reg/nbio/nbio_7_4_offset.h"
43 #include "asic_reg/nbio/nbio_7_4_sh_mask.h"
44 #include "asic_reg/smuio/smuio_11_0_0_offset.h"
45 #include "asic_reg/smuio/smuio_11_0_0_sh_mask.h"
49 #define SMU11_VOLTAGE_SCALE 4
51 static int smu_v11_0_send_msg_without_waiting(struct smu_context *smu,
54 struct amdgpu_device *adev = smu->adev;
55 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
59 static int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg)
61 struct amdgpu_device *adev = smu->adev;
63 *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
67 static int smu_v11_0_wait_for_response(struct smu_context *smu)
69 struct amdgpu_device *adev = smu->adev;
70 uint32_t cur_value, i, timeout = adev->usec_timeout * 10;
72 for (i = 0; i < timeout; i++) {
73 cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
74 if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
79 /* timeout means wrong logic */
83 return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
86 static int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg)
88 struct amdgpu_device *adev = smu->adev;
89 int ret = 0, index = 0;
91 index = smu_msg_get_index(smu, msg);
95 smu_v11_0_wait_for_response(smu);
97 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
99 smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
101 ret = smu_v11_0_wait_for_response(smu);
104 pr_err("Failed to send message 0x%x, response 0x%x\n", index,
112 smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
116 struct amdgpu_device *adev = smu->adev;
117 int ret = 0, index = 0;
119 index = smu_msg_get_index(smu, msg);
123 ret = smu_v11_0_wait_for_response(smu);
125 pr_err("Failed to send message 0x%x, response 0x%x, param 0x%x\n",
128 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
130 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
132 smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
134 ret = smu_v11_0_wait_for_response(smu);
136 pr_err("Failed to send message 0x%x, response 0x%x param 0x%x\n",
142 static int smu_v11_0_init_microcode(struct smu_context *smu)
144 struct amdgpu_device *adev = smu->adev;
145 const char *chip_name;
148 const struct smc_firmware_header_v1_0 *hdr;
149 const struct common_firmware_header *header;
150 struct amdgpu_firmware_info *ucode = NULL;
152 switch (adev->asic_type) {
154 chip_name = "vega20";
157 chip_name = "navi10";
163 snprintf(fw_name, sizeof(fw_name), "/*(DEBLOBBED)*/", chip_name);
165 err = reject_firmware(&adev->pm.fw, fw_name, adev->dev);
168 err = amdgpu_ucode_validate(adev->pm.fw);
172 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
173 amdgpu_ucode_print_smc_hdr(&hdr->header);
174 adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
176 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
177 ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
178 ucode->ucode_id = AMDGPU_UCODE_ID_SMC;
179 ucode->fw = adev->pm.fw;
180 header = (const struct common_firmware_header *)ucode->fw->data;
181 adev->firmware.fw_size +=
182 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
187 DRM_ERROR("smu_v11_0: Failed to load firmware \"%s\"\n",
189 release_firmware(adev->pm.fw);
195 static int smu_v11_0_load_microcode(struct smu_context *smu)
197 struct amdgpu_device *adev = smu->adev;
199 const struct smc_firmware_header_v1_0 *hdr;
200 uint32_t addr_start = MP1_SRAM;
202 uint32_t mp1_fw_flags;
204 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
205 src = (const uint32_t *)(adev->pm.fw->data +
206 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
208 for (i = 1; i < MP1_SMC_SIZE/4 - 1; i++) {
209 WREG32_PCIE(addr_start, src[i]);
213 WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff),
214 1 & MP1_SMN_PUB_CTRL__RESET_MASK);
215 WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff),
216 1 & ~MP1_SMN_PUB_CTRL__RESET_MASK);
218 for (i = 0; i < adev->usec_timeout; i++) {
219 mp1_fw_flags = RREG32_PCIE(MP1_Public |
220 (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
221 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
222 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
227 if (i == adev->usec_timeout)
233 static int smu_v11_0_check_fw_status(struct smu_context *smu)
235 struct amdgpu_device *adev = smu->adev;
236 uint32_t mp1_fw_flags;
238 mp1_fw_flags = RREG32_PCIE(MP1_Public |
239 (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
241 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
242 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
248 static int smu_v11_0_check_fw_version(struct smu_context *smu)
250 uint32_t if_version = 0xff, smu_version = 0xff;
252 uint8_t smu_minor, smu_debug;
255 ret = smu_get_smc_version(smu, &if_version, &smu_version);
259 smu_major = (smu_version >> 16) & 0xffff;
260 smu_minor = (smu_version >> 8) & 0xff;
261 smu_debug = (smu_version >> 0) & 0xff;
264 * 1. if_version mismatch is not critical as our fw is designed
265 * to be backward compatible.
266 * 2. New fw usually brings some optimizations. But that's visible
267 * only on the paired driver.
268 * Considering above, we just leave user a warning message instead
269 * of halt driver loading.
271 if (if_version != smu->smc_if_version) {
272 pr_info("smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
273 "smu fw version = 0x%08x (%d.%d.%d)\n",
274 smu->smc_if_version, if_version,
275 smu_version, smu_major, smu_minor, smu_debug);
276 pr_warn("SMU driver if version not matched\n");
282 static int smu_v11_0_set_pptable_v2_0(struct smu_context *smu, void **table, uint32_t *size)
284 struct amdgpu_device *adev = smu->adev;
285 uint32_t ppt_offset_bytes;
286 const struct smc_firmware_header_v2_0 *v2;
288 v2 = (const struct smc_firmware_header_v2_0 *) adev->pm.fw->data;
290 ppt_offset_bytes = le32_to_cpu(v2->ppt_offset_bytes);
291 *size = le32_to_cpu(v2->ppt_size_bytes);
292 *table = (uint8_t *)v2 + ppt_offset_bytes;
297 static int smu_v11_0_set_pptable_v2_1(struct smu_context *smu, void **table, uint32_t *size, uint32_t pptable_id)
299 struct amdgpu_device *adev = smu->adev;
300 const struct smc_firmware_header_v2_1 *v2_1;
301 struct smc_soft_pptable_entry *entries;
302 uint32_t pptable_count = 0;
305 v2_1 = (const struct smc_firmware_header_v2_1 *) adev->pm.fw->data;
306 entries = (struct smc_soft_pptable_entry *)
307 ((uint8_t *)v2_1 + le32_to_cpu(v2_1->pptable_entry_offset));
308 pptable_count = le32_to_cpu(v2_1->pptable_count);
309 for (i = 0; i < pptable_count; i++) {
310 if (le32_to_cpu(entries[i].id) == pptable_id) {
311 *table = ((uint8_t *)v2_1 + le32_to_cpu(entries[i].ppt_offset_bytes));
312 *size = le32_to_cpu(entries[i].ppt_size_bytes);
317 if (i == pptable_count)
323 static int smu_v11_0_setup_pptable(struct smu_context *smu)
325 struct amdgpu_device *adev = smu->adev;
326 const struct smc_firmware_header_v1_0 *hdr;
329 uint16_t atom_table_size;
332 uint16_t version_major, version_minor;
334 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
335 version_major = le16_to_cpu(hdr->header.header_version_major);
336 version_minor = le16_to_cpu(hdr->header.header_version_minor);
337 if (version_major == 2 && smu->smu_table.boot_values.pp_table_id > 0) {
338 switch (version_minor) {
340 ret = smu_v11_0_set_pptable_v2_0(smu, &table, &size);
343 ret = smu_v11_0_set_pptable_v2_1(smu, &table, &size,
344 smu->smu_table.boot_values.pp_table_id);
354 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
357 ret = smu_get_atom_data_table(smu, index, &atom_table_size, &frev, &crev,
361 size = atom_table_size;
364 if (!smu->smu_table.power_play_table)
365 smu->smu_table.power_play_table = table;
366 if (!smu->smu_table.power_play_table_size)
367 smu->smu_table.power_play_table_size = size;
372 static int smu_v11_0_init_dpm_context(struct smu_context *smu)
374 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
376 if (smu_dpm->dpm_context || smu_dpm->dpm_context_size != 0)
379 return smu_alloc_dpm_context(smu);
382 static int smu_v11_0_fini_dpm_context(struct smu_context *smu)
384 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
386 if (!smu_dpm->dpm_context || smu_dpm->dpm_context_size == 0)
389 kfree(smu_dpm->dpm_context);
390 kfree(smu_dpm->golden_dpm_context);
391 kfree(smu_dpm->dpm_current_power_state);
392 kfree(smu_dpm->dpm_request_power_state);
393 smu_dpm->dpm_context = NULL;
394 smu_dpm->golden_dpm_context = NULL;
395 smu_dpm->dpm_context_size = 0;
396 smu_dpm->dpm_current_power_state = NULL;
397 smu_dpm->dpm_request_power_state = NULL;
402 static int smu_v11_0_init_smc_tables(struct smu_context *smu)
404 struct smu_table_context *smu_table = &smu->smu_table;
405 struct smu_table *tables = NULL;
408 if (smu_table->tables || smu_table->table_count == 0)
411 tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table),
416 smu_table->tables = tables;
418 ret = smu_tables_init(smu, tables);
422 ret = smu_v11_0_init_dpm_context(smu);
429 static int smu_v11_0_fini_smc_tables(struct smu_context *smu)
431 struct smu_table_context *smu_table = &smu->smu_table;
434 if (!smu_table->tables || smu_table->table_count == 0)
437 kfree(smu_table->tables);
438 kfree(smu_table->metrics_table);
439 smu_table->tables = NULL;
440 smu_table->table_count = 0;
441 smu_table->metrics_table = NULL;
442 smu_table->metrics_time = 0;
444 ret = smu_v11_0_fini_dpm_context(smu);
450 static int smu_v11_0_init_power(struct smu_context *smu)
452 struct smu_power_context *smu_power = &smu->smu_power;
454 if (!smu->pm_enabled)
456 if (smu_power->power_context || smu_power->power_context_size != 0)
459 smu_power->power_context = kzalloc(sizeof(struct smu_11_0_dpm_context),
461 if (!smu_power->power_context)
463 smu_power->power_context_size = sizeof(struct smu_11_0_dpm_context);
468 static int smu_v11_0_fini_power(struct smu_context *smu)
470 struct smu_power_context *smu_power = &smu->smu_power;
472 if (!smu->pm_enabled)
474 if (!smu_power->power_context || smu_power->power_context_size == 0)
477 kfree(smu_power->power_context);
478 smu_power->power_context = NULL;
479 smu_power->power_context_size = 0;
484 int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu)
489 struct atom_common_table_header *header;
490 struct atom_firmware_info_v3_3 *v_3_3;
491 struct atom_firmware_info_v3_1 *v_3_1;
493 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
496 ret = smu_get_atom_data_table(smu, index, &size, &frev, &crev,
497 (uint8_t **)&header);
501 if (header->format_revision != 3) {
502 pr_err("unknown atom_firmware_info version! for smu11\n");
506 switch (header->content_revision) {
510 v_3_1 = (struct atom_firmware_info_v3_1 *)header;
511 smu->smu_table.boot_values.revision = v_3_1->firmware_revision;
512 smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz;
513 smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz;
514 smu->smu_table.boot_values.socclk = 0;
515 smu->smu_table.boot_values.dcefclk = 0;
516 smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv;
517 smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv;
518 smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv;
519 smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv;
520 smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id;
521 smu->smu_table.boot_values.pp_table_id = 0;
525 v_3_3 = (struct atom_firmware_info_v3_3 *)header;
526 smu->smu_table.boot_values.revision = v_3_3->firmware_revision;
527 smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz;
528 smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz;
529 smu->smu_table.boot_values.socclk = 0;
530 smu->smu_table.boot_values.dcefclk = 0;
531 smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv;
532 smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv;
533 smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv;
534 smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv;
535 smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id;
536 smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id;
542 static int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu)
545 struct amdgpu_device *adev = smu->adev;
546 struct atom_get_smu_clock_info_parameters_v3_1 input = {0};
547 struct atom_get_smu_clock_info_output_parameters_v3_1 *output;
549 input.clk_id = SMU11_SYSPLL0_SOCCLK_ID;
550 input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
551 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
554 ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
559 output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
560 smu->smu_table.boot_values.socclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
562 memset(&input, 0, sizeof(input));
563 input.clk_id = SMU11_SYSPLL0_DCEFCLK_ID;
564 input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
565 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
568 ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
573 output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
574 smu->smu_table.boot_values.dcefclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
576 memset(&input, 0, sizeof(input));
577 input.clk_id = SMU11_SYSPLL0_ECLK_ID;
578 input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
579 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
582 ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
587 output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
588 smu->smu_table.boot_values.eclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
590 memset(&input, 0, sizeof(input));
591 input.clk_id = SMU11_SYSPLL0_VCLK_ID;
592 input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
593 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
596 ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
601 output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
602 smu->smu_table.boot_values.vclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
604 memset(&input, 0, sizeof(input));
605 input.clk_id = SMU11_SYSPLL0_DCLK_ID;
606 input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
607 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
610 ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
615 output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
616 smu->smu_table.boot_values.dclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
621 static int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
623 struct smu_table_context *smu_table = &smu->smu_table;
624 struct smu_table *memory_pool = &smu_table->memory_pool;
627 uint32_t address_low, address_high;
629 if (memory_pool->size == 0 || memory_pool->cpu_addr == NULL)
632 address = (uintptr_t)memory_pool->cpu_addr;
633 address_high = (uint32_t)upper_32_bits(address);
634 address_low = (uint32_t)lower_32_bits(address);
636 ret = smu_send_smc_msg_with_param(smu,
637 SMU_MSG_SetSystemVirtualDramAddrHigh,
641 ret = smu_send_smc_msg_with_param(smu,
642 SMU_MSG_SetSystemVirtualDramAddrLow,
647 address = memory_pool->mc_address;
648 address_high = (uint32_t)upper_32_bits(address);
649 address_low = (uint32_t)lower_32_bits(address);
651 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
655 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
659 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
660 (uint32_t)memory_pool->size);
667 static int smu_v11_0_check_pptable(struct smu_context *smu)
671 ret = smu_check_powerplay_table(smu);
675 static int smu_v11_0_parse_pptable(struct smu_context *smu)
679 struct smu_table_context *table_context = &smu->smu_table;
680 struct smu_table *table = &table_context->tables[SMU_TABLE_PPTABLE];
682 if (table_context->driver_pptable)
685 table_context->driver_pptable = kzalloc(table->size, GFP_KERNEL);
687 if (!table_context->driver_pptable)
690 ret = smu_store_powerplay_table(smu);
694 ret = smu_append_powerplay_table(smu);
699 static int smu_v11_0_populate_smc_pptable(struct smu_context *smu)
703 ret = smu_set_default_dpm_table(smu);
708 static int smu_v11_0_write_pptable(struct smu_context *smu)
710 struct smu_table_context *table_context = &smu->smu_table;
713 ret = smu_update_table(smu, SMU_TABLE_PPTABLE, 0,
714 table_context->driver_pptable, true);
719 static int smu_v11_0_write_watermarks_table(struct smu_context *smu)
722 struct smu_table_context *smu_table = &smu->smu_table;
723 struct smu_table *table = NULL;
725 table = &smu_table->tables[SMU_TABLE_WATERMARKS];
729 if (!table->cpu_addr)
732 ret = smu_update_table(smu, SMU_TABLE_WATERMARKS, 0, table->cpu_addr,
738 static int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
742 ret = smu_send_smc_msg_with_param(smu,
743 SMU_MSG_SetMinDeepSleepDcefclk, clk);
745 pr_err("SMU11 attempt to set divider for DCEFCLK Failed!");
750 static int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
752 struct smu_table_context *table_context = &smu->smu_table;
754 if (!smu->pm_enabled)
759 return smu_set_deep_sleep_dcefclk(smu,
760 table_context->boot_values.dcefclk / 100);
763 static int smu_v11_0_set_tool_table_location(struct smu_context *smu)
766 struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG];
768 if (tool_table->mc_address) {
769 ret = smu_send_smc_msg_with_param(smu,
770 SMU_MSG_SetToolsDramAddrHigh,
771 upper_32_bits(tool_table->mc_address));
773 ret = smu_send_smc_msg_with_param(smu,
774 SMU_MSG_SetToolsDramAddrLow,
775 lower_32_bits(tool_table->mc_address));
781 static int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
785 if (!smu->pm_enabled)
788 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count);
792 static int smu_v11_0_update_feature_enable_state(struct smu_context *smu, uint32_t feature_id, bool enabled)
794 uint32_t feature_low = 0, feature_high = 0;
797 if (!smu->pm_enabled)
799 if (feature_id >= 0 && feature_id < 31)
800 feature_low = (1 << feature_id);
801 else if (feature_id > 31 && feature_id < 63)
802 feature_high = (1 << feature_id);
807 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
811 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
817 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
821 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
831 static int smu_v11_0_set_allowed_mask(struct smu_context *smu)
833 struct smu_feature *feature = &smu->smu_feature;
835 uint32_t feature_mask[2];
837 mutex_lock(&feature->mutex);
838 if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64)
841 bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);
843 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
848 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow,
854 mutex_unlock(&feature->mutex);
858 static int smu_v11_0_get_enabled_mask(struct smu_context *smu,
859 uint32_t *feature_mask, uint32_t num)
861 uint32_t feature_mask_high = 0, feature_mask_low = 0;
864 if (!feature_mask || num < 2)
867 ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh);
870 ret = smu_read_smc_arg(smu, &feature_mask_high);
874 ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow);
877 ret = smu_read_smc_arg(smu, &feature_mask_low);
881 feature_mask[0] = feature_mask_low;
882 feature_mask[1] = feature_mask_high;
887 static int smu_v11_0_system_features_control(struct smu_context *smu,
890 struct smu_feature *feature = &smu->smu_feature;
891 uint32_t feature_mask[2];
894 if (smu->pm_enabled) {
895 ret = smu_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
896 SMU_MSG_DisableAllSmuFeatures));
901 ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
905 bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
906 feature->feature_num);
907 bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
908 feature->feature_num);
913 static int smu_v11_0_notify_display_change(struct smu_context *smu)
917 if (!smu->pm_enabled)
919 if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
920 smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM)
921 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1);
927 smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
928 enum smu_clk_type clock_select)
932 if (!smu->pm_enabled)
934 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq,
935 smu_clk_get_index(smu, clock_select) << 16);
937 pr_err("[GetMaxSustainableClock] Failed to get max DC clock from SMC!");
941 ret = smu_read_smc_arg(smu, clock);
948 /* if DC limit is zero, return AC limit */
949 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
950 smu_clk_get_index(smu, clock_select) << 16);
952 pr_err("[GetMaxSustainableClock] failed to get max AC clock from SMC!");
956 ret = smu_read_smc_arg(smu, clock);
961 static int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
963 struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks;
966 max_sustainable_clocks = kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks),
968 smu->smu_table.max_sustainable_clocks = (void *)max_sustainable_clocks;
970 max_sustainable_clocks->uclock = smu->smu_table.boot_values.uclk / 100;
971 max_sustainable_clocks->soc_clock = smu->smu_table.boot_values.socclk / 100;
972 max_sustainable_clocks->dcef_clock = smu->smu_table.boot_values.dcefclk / 100;
973 max_sustainable_clocks->display_clock = 0xFFFFFFFF;
974 max_sustainable_clocks->phy_clock = 0xFFFFFFFF;
975 max_sustainable_clocks->pixel_clock = 0xFFFFFFFF;
977 if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
978 ret = smu_v11_0_get_max_sustainable_clock(smu,
979 &(max_sustainable_clocks->uclock),
982 pr_err("[%s] failed to get max UCLK from SMC!",
988 if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
989 ret = smu_v11_0_get_max_sustainable_clock(smu,
990 &(max_sustainable_clocks->soc_clock),
993 pr_err("[%s] failed to get max SOCCLK from SMC!",
999 if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
1000 ret = smu_v11_0_get_max_sustainable_clock(smu,
1001 &(max_sustainable_clocks->dcef_clock),
1004 pr_err("[%s] failed to get max DCEFCLK from SMC!",
1009 ret = smu_v11_0_get_max_sustainable_clock(smu,
1010 &(max_sustainable_clocks->display_clock),
1013 pr_err("[%s] failed to get max DISPCLK from SMC!",
1017 ret = smu_v11_0_get_max_sustainable_clock(smu,
1018 &(max_sustainable_clocks->phy_clock),
1021 pr_err("[%s] failed to get max PHYCLK from SMC!",
1025 ret = smu_v11_0_get_max_sustainable_clock(smu,
1026 &(max_sustainable_clocks->pixel_clock),
1029 pr_err("[%s] failed to get max PIXCLK from SMC!",
1035 if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock)
1036 max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock;
1041 static int smu_v11_0_get_power_limit(struct smu_context *smu,
1048 mutex_lock(&smu->mutex);
1049 *limit = smu->default_power_limit;
1050 if (smu->od_enabled) {
1051 *limit *= (100 + smu->smu_table.TDPODLimit);
1054 mutex_unlock(&smu->mutex);
1056 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetPptLimit,
1057 smu_power_get_index(smu, SMU_POWER_SOURCE_AC) << 16);
1059 pr_err("[%s] get PPT limit failed!", __func__);
1062 smu_read_smc_arg(smu, limit);
1063 smu->power_limit = *limit;
1069 static int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
1071 uint32_t max_power_limit;
1075 n = smu->default_power_limit;
1077 max_power_limit = smu->default_power_limit;
1079 if (smu->od_enabled) {
1080 max_power_limit *= (100 + smu->smu_table.TDPODLimit);
1081 max_power_limit /= 100;
1084 if (smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT))
1085 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n);
1087 pr_err("[%s] Set power limit Failed!", __func__);
1094 static int smu_v11_0_get_current_clk_freq(struct smu_context *smu,
1095 enum smu_clk_type clk_id,
1101 if (clk_id >= SMU_CLK_COUNT || !value)
1104 /* if don't has GetDpmClockFreq Message, try get current clock by SmuMetrics_t */
1105 if (smu_msg_get_index(smu, SMU_MSG_GetDpmClockFreq) == 0)
1106 ret = smu_get_current_clk_freq_by_table(smu, clk_id, &freq);
1108 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmClockFreq,
1109 (smu_clk_get_index(smu, clk_id) << 16));
1113 ret = smu_read_smc_arg(smu, &freq);
1124 static int smu_v11_0_set_thermal_range(struct smu_context *smu,
1125 struct smu_temperature_range *range)
1127 struct amdgpu_device *adev = smu->adev;
1128 int low = SMU_THERMAL_MINIMUM_ALERT_TEMP;
1129 int high = SMU_THERMAL_MAXIMUM_ALERT_TEMP;
1135 if (low < range->min)
1137 if (high > range->max)
1140 low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP, range->min);
1141 high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP, range->max);
1146 val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL);
1147 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
1148 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
1149 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0);
1150 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0);
1151 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high & 0xff));
1152 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low & 0xff));
1153 val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
1155 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
1160 static int smu_v11_0_enable_thermal_alert(struct smu_context *smu)
1162 struct amdgpu_device *adev = smu->adev;
1165 val |= (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT);
1166 val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT);
1167 val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT);
1169 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, val);
1174 static int smu_v11_0_start_thermal_control(struct smu_context *smu)
1177 struct smu_temperature_range range = {
1187 struct amdgpu_device *adev = smu->adev;
1189 if (!smu->pm_enabled)
1192 ret = smu_get_thermal_temperature_range(smu, &range);
1196 if (smu->smu_table.thermal_controller_type) {
1197 ret = smu_v11_0_set_thermal_range(smu, &range);
1201 ret = smu_v11_0_enable_thermal_alert(smu);
1205 ret = smu_set_thermal_fan_table(smu);
1210 adev->pm.dpm.thermal.min_temp = range.min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1211 adev->pm.dpm.thermal.max_temp = range.max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1212 adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1213 adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1214 adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1215 adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1216 adev->pm.dpm.thermal.min_mem_temp = range.mem_min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1217 adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1218 adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1219 adev->pm.dpm.thermal.min_temp = range.min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1220 adev->pm.dpm.thermal.max_temp = range.max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1225 static uint16_t convert_to_vddc(uint8_t vid)
1227 return (uint16_t) ((6200 - (vid * 25)) / SMU11_VOLTAGE_SCALE);
1230 static int smu_v11_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value)
1232 struct amdgpu_device *adev = smu->adev;
1233 uint32_t vdd = 0, val_vid = 0;
1237 val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_TEL_PLANE0) &
1238 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK) >>
1239 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT;
1241 vdd = (uint32_t)convert_to_vddc((uint8_t)val_vid);
1249 static int smu_v11_0_read_sensor(struct smu_context *smu,
1250 enum amd_pp_sensors sensor,
1251 void *data, uint32_t *size)
1255 case AMDGPU_PP_SENSOR_GFX_MCLK:
1256 ret = smu_get_current_clk_freq(smu, SMU_UCLK, (uint32_t *)data);
1259 case AMDGPU_PP_SENSOR_GFX_SCLK:
1260 ret = smu_get_current_clk_freq(smu, SMU_GFXCLK, (uint32_t *)data);
1263 case AMDGPU_PP_SENSOR_VDDGFX:
1264 ret = smu_v11_0_get_gfx_vdd(smu, (uint32_t *)data);
1267 case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
1268 *(uint32_t *)data = 0;
1272 ret = smu_common_read_sensor(smu, sensor, data, size);
1276 /* try get sensor data by asic */
1278 ret = smu_asic_read_sensor(smu, sensor, data, size);
1287 smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
1288 struct pp_display_clock_request
1291 enum amd_pp_clock_type clk_type = clock_req->clock_type;
1293 enum smu_clk_type clk_select = 0;
1294 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
1296 if (!smu->pm_enabled)
1299 if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) ||
1300 smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
1302 case amd_pp_dcef_clock:
1303 clk_select = SMU_DCEFCLK;
1305 case amd_pp_disp_clock:
1306 clk_select = SMU_DISPCLK;
1308 case amd_pp_pixel_clock:
1309 clk_select = SMU_PIXCLK;
1311 case amd_pp_phy_clock:
1312 clk_select = SMU_PHYCLK;
1314 case amd_pp_mem_clock:
1315 clk_select = SMU_UCLK;
1318 pr_info("[%s] Invalid Clock Type!", __func__);
1326 mutex_lock(&smu->mutex);
1327 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
1328 (smu_clk_get_index(smu, clk_select) << 16) | clk_freq);
1329 mutex_unlock(&smu->mutex);
1337 smu_v11_0_set_watermarks_for_clock_ranges(struct smu_context *smu, struct
1338 dm_pp_wm_sets_with_clock_ranges_soc15
1342 struct smu_table *watermarks = &smu->smu_table.tables[SMU_TABLE_WATERMARKS];
1343 void *table = watermarks->cpu_addr;
1345 if (!smu->disable_watermark &&
1346 smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
1347 smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
1348 smu_set_watermarks_table(smu, table, clock_ranges);
1349 smu->watermarks_bitmap |= WATERMARKS_EXIST;
1350 smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
1356 static int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
1359 struct amdgpu_device *adev = smu->adev;
1361 switch (adev->asic_type) {
1365 if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
1367 mutex_lock(&smu->mutex);
1369 ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff);
1371 ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff);
1372 mutex_unlock(&smu->mutex);
1382 smu_v11_0_get_fan_control_mode(struct smu_context *smu)
1384 if (!smu_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT))
1385 return AMD_FAN_CTRL_MANUAL;
1387 return AMD_FAN_CTRL_AUTO;
1391 smu_v11_0_smc_fan_control(struct smu_context *smu, bool start)
1395 if (!smu_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT))
1398 ret = smu_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, start);
1400 pr_err("[%s]%s smc FAN CONTROL feature failed!",
1401 __func__, (start ? "Start" : "Stop"));
1407 smu_v11_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode)
1409 struct amdgpu_device *adev = smu->adev;
1411 WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
1412 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
1413 CG_FDO_CTRL2, TMIN, 0));
1414 WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
1415 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
1416 CG_FDO_CTRL2, FDO_PWM_MODE, mode));
1422 smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
1424 struct amdgpu_device *adev = smu->adev;
1433 if (smu_v11_0_smc_fan_control(smu, stop))
1435 duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
1436 CG_FDO_CTRL1, FMAX_DUTY100);
1440 tmp64 = (uint64_t)speed * duty100;
1442 duty = (uint32_t)tmp64;
1444 WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0,
1445 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL0),
1446 CG_FDO_CTRL0, FDO_STATIC_DUTY, duty));
1448 return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC);
1452 smu_v11_0_set_fan_control_mode(struct smu_context *smu,
1460 case AMD_FAN_CTRL_NONE:
1461 ret = smu_v11_0_set_fan_speed_percent(smu, 100);
1463 case AMD_FAN_CTRL_MANUAL:
1464 ret = smu_v11_0_smc_fan_control(smu, stop);
1466 case AMD_FAN_CTRL_AUTO:
1467 ret = smu_v11_0_smc_fan_control(smu, start);
1474 pr_err("[%s]Set fan control mode failed!", __func__);
1481 static int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
1484 struct amdgpu_device *adev = smu->adev;
1486 uint32_t tach_period, crystal_clock_freq;
1492 mutex_lock(&(smu->mutex));
1493 ret = smu_v11_0_smc_fan_control(smu, stop);
1495 goto set_fan_speed_rpm_failed;
1497 crystal_clock_freq = amdgpu_asic_get_xclk(adev);
1498 tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
1499 WREG32_SOC15(THM, 0, mmCG_TACH_CTRL,
1500 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL),
1501 CG_TACH_CTRL, TARGET_PERIOD,
1504 ret = smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM);
1506 set_fan_speed_rpm_failed:
1507 mutex_unlock(&(smu->mutex));
1511 #define XGMI_STATE_D0 1
1512 #define XGMI_STATE_D3 0
1514 static int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
1518 mutex_lock(&(smu->mutex));
1519 ret = smu_send_smc_msg_with_param(smu,
1520 SMU_MSG_SetXgmiMode,
1521 pstate ? XGMI_STATE_D0 : XGMI_STATE_D3);
1522 mutex_unlock(&(smu->mutex));
1526 #define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */
1527 #define THM_11_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */
1529 static int smu_v11_0_irq_process(struct amdgpu_device *adev,
1530 struct amdgpu_irq_src *source,
1531 struct amdgpu_iv_entry *entry)
1533 uint32_t client_id = entry->client_id;
1534 uint32_t src_id = entry->src_id;
1536 if (client_id == SOC15_IH_CLIENTID_THM) {
1538 case THM_11_0__SRCID__THM_DIG_THERM_L2H:
1539 pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n",
1540 PCI_BUS_NUM(adev->pdev->devfn),
1541 PCI_SLOT(adev->pdev->devfn),
1542 PCI_FUNC(adev->pdev->devfn));
1544 case THM_11_0__SRCID__THM_DIG_THERM_H2L:
1545 pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n",
1546 PCI_BUS_NUM(adev->pdev->devfn),
1547 PCI_SLOT(adev->pdev->devfn),
1548 PCI_FUNC(adev->pdev->devfn));
1551 pr_warn("GPU under temperature range unknown src id (%d), detected on PCIe %d:%d.%d!\n",
1553 PCI_BUS_NUM(adev->pdev->devfn),
1554 PCI_SLOT(adev->pdev->devfn),
1555 PCI_FUNC(adev->pdev->devfn));
1564 static const struct amdgpu_irq_src_funcs smu_v11_0_irq_funcs =
1566 .process = smu_v11_0_irq_process,
1569 static int smu_v11_0_register_irq_handler(struct smu_context *smu)
1571 struct amdgpu_device *adev = smu->adev;
1572 struct amdgpu_irq_src *irq_src = smu->irq_source;
1575 /* already register */
1579 irq_src = kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
1582 smu->irq_source = irq_src;
1584 irq_src->funcs = &smu_v11_0_irq_funcs;
1586 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
1587 THM_11_0__SRCID__THM_DIG_THERM_L2H,
1592 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
1593 THM_11_0__SRCID__THM_DIG_THERM_H2L,
1601 static int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
1602 struct pp_smu_nv_clock_table *max_clocks)
1604 struct smu_table_context *table_context = &smu->smu_table;
1605 struct smu_11_0_max_sustainable_clocks *sustainable_clocks = NULL;
1607 if (!max_clocks || !table_context->max_sustainable_clocks)
1610 sustainable_clocks = table_context->max_sustainable_clocks;
1612 max_clocks->dcfClockInKhz =
1613 (unsigned int) sustainable_clocks->dcef_clock * 1000;
1614 max_clocks->displayClockInKhz =
1615 (unsigned int) sustainable_clocks->display_clock * 1000;
1616 max_clocks->phyClockInKhz =
1617 (unsigned int) sustainable_clocks->phy_clock * 1000;
1618 max_clocks->pixelClockInKhz =
1619 (unsigned int) sustainable_clocks->pixel_clock * 1000;
1620 max_clocks->uClockInKhz =
1621 (unsigned int) sustainable_clocks->uclock * 1000;
1622 max_clocks->socClockInKhz =
1623 (unsigned int) sustainable_clocks->soc_clock * 1000;
1624 max_clocks->dscClockInKhz = 0;
1625 max_clocks->dppClockInKhz = 0;
1626 max_clocks->fabricClockInKhz = 0;
1631 static int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu)
1635 mutex_lock(&smu->mutex);
1636 ret = smu_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME);
1637 mutex_unlock(&smu->mutex);
1642 static int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_v11_0_baco_seq baco_seq)
1644 return smu_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq);
1647 static bool smu_v11_0_baco_is_support(struct smu_context *smu)
1649 struct amdgpu_device *adev = smu->adev;
1650 struct smu_baco_context *smu_baco = &smu->smu_baco;
1654 mutex_lock(&smu_baco->mutex);
1655 baco_support = smu_baco->platform_support;
1656 mutex_unlock(&smu_baco->mutex);
1661 if (!smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
1664 val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
1665 if (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK)
1671 static enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu)
1673 struct smu_baco_context *smu_baco = &smu->smu_baco;
1674 enum smu_baco_state baco_state = SMU_BACO_STATE_EXIT;
1676 mutex_lock(&smu_baco->mutex);
1677 baco_state = smu_baco->state;
1678 mutex_unlock(&smu_baco->mutex);
1683 static int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
1686 struct smu_baco_context *smu_baco = &smu->smu_baco;
1689 if (smu_v11_0_baco_get_state(smu) == state)
1692 mutex_lock(&smu_baco->mutex);
1694 if (state == SMU_BACO_STATE_ENTER)
1695 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, BACO_SEQ_BACO);
1697 ret = smu_send_smc_msg(smu, SMU_MSG_ExitBaco);
1701 smu_baco->state = state;
1703 mutex_unlock(&smu_baco->mutex);
1707 static int smu_v11_0_baco_reset(struct smu_context *smu)
1711 ret = smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO);
1715 ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_ENTER);
1721 ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_EXIT);
1728 static const struct smu_funcs smu_v11_0_funcs = {
1729 .init_microcode = smu_v11_0_init_microcode,
1730 .load_microcode = smu_v11_0_load_microcode,
1731 .check_fw_status = smu_v11_0_check_fw_status,
1732 .check_fw_version = smu_v11_0_check_fw_version,
1733 .send_smc_msg = smu_v11_0_send_msg,
1734 .send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
1735 .read_smc_arg = smu_v11_0_read_arg,
1736 .setup_pptable = smu_v11_0_setup_pptable,
1737 .init_smc_tables = smu_v11_0_init_smc_tables,
1738 .fini_smc_tables = smu_v11_0_fini_smc_tables,
1739 .init_power = smu_v11_0_init_power,
1740 .fini_power = smu_v11_0_fini_power,
1741 .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
1742 .get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios,
1743 .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
1744 .check_pptable = smu_v11_0_check_pptable,
1745 .parse_pptable = smu_v11_0_parse_pptable,
1746 .populate_smc_pptable = smu_v11_0_populate_smc_pptable,
1747 .write_pptable = smu_v11_0_write_pptable,
1748 .write_watermarks_table = smu_v11_0_write_watermarks_table,
1749 .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
1750 .set_tool_table_location = smu_v11_0_set_tool_table_location,
1751 .init_display_count = smu_v11_0_init_display_count,
1752 .set_allowed_mask = smu_v11_0_set_allowed_mask,
1753 .get_enabled_mask = smu_v11_0_get_enabled_mask,
1754 .system_features_control = smu_v11_0_system_features_control,
1755 .update_feature_enable_state = smu_v11_0_update_feature_enable_state,
1756 .notify_display_change = smu_v11_0_notify_display_change,
1757 .get_power_limit = smu_v11_0_get_power_limit,
1758 .set_power_limit = smu_v11_0_set_power_limit,
1759 .get_current_clk_freq = smu_v11_0_get_current_clk_freq,
1760 .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
1761 .start_thermal_control = smu_v11_0_start_thermal_control,
1762 .read_sensor = smu_v11_0_read_sensor,
1763 .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk,
1764 .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
1765 .set_watermarks_for_clock_ranges = smu_v11_0_set_watermarks_for_clock_ranges,
1766 .get_fan_control_mode = smu_v11_0_get_fan_control_mode,
1767 .set_fan_control_mode = smu_v11_0_set_fan_control_mode,
1768 .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
1769 .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
1770 .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
1771 .gfx_off_control = smu_v11_0_gfx_off_control,
1772 .register_irq_handler = smu_v11_0_register_irq_handler,
1773 .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
1774 .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
1775 .baco_is_support = smu_v11_0_baco_is_support,
1776 .baco_get_state = smu_v11_0_baco_get_state,
1777 .baco_set_state = smu_v11_0_baco_set_state,
1778 .baco_reset = smu_v11_0_baco_reset,
1781 void smu_v11_0_set_smu_funcs(struct smu_context *smu)
1783 struct amdgpu_device *adev = smu->adev;
1785 smu->funcs = &smu_v11_0_funcs;
1786 switch (adev->asic_type) {
1788 vega20_set_ppt_funcs(smu);
1791 navi10_set_ppt_funcs(smu);
1794 pr_warn("Unknown asic for smu11\n");