2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
27 #include "amdgpu_vcn.h"
28 #include "amdgpu_pm.h"
32 #include "mmsch_v1_0.h"
34 #include "vcn/vcn_2_5_offset.h"
35 #include "vcn/vcn_2_5_sh_mask.h"
36 #include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
38 #define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x27
39 #define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x0f
40 #define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET 0x10
41 #define mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET 0x11
42 #define mmUVD_NO_OP_INTERNAL_OFFSET 0x29
43 #define mmUVD_GP_SCRATCH8_INTERNAL_OFFSET 0x66
44 #define mmUVD_SCRATCH9_INTERNAL_OFFSET 0xc01d
46 #define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET 0x431
47 #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x3b4
48 #define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x3b5
49 #define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x25c
51 #define VCN25_MAX_HW_INSTANCES_ARCTURUS 2
53 static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev);
54 static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev);
55 static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev);
56 static int vcn_v2_5_set_powergating_state(void *handle,
57 enum amd_powergating_state state);
58 static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
59 int inst_idx, struct dpg_pause_state *new_state);
60 static int vcn_v2_5_sriov_start(struct amdgpu_device *adev);
62 static int amdgpu_ih_clientid_vcns[] = {
63 SOC15_IH_CLIENTID_VCN,
64 SOC15_IH_CLIENTID_VCN1
68 * vcn_v2_5_early_init - set function pointers
70 * @handle: amdgpu_device pointer
72 * Set ring and irq function pointers
74 static int vcn_v2_5_early_init(void *handle)
76 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
78 if (amdgpu_sriov_vf(adev)) {
79 adev->vcn.num_vcn_inst = 2;
80 adev->vcn.harvest_config = 0;
81 adev->vcn.num_enc_rings = 1;
83 if (adev->asic_type == CHIP_ARCTURUS) {
87 adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS;
88 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
89 harvest = RREG32_SOC15(UVD, i, mmCC_UVD_HARVESTING);
90 if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
91 adev->vcn.harvest_config |= 1 << i;
94 if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
95 AMDGPU_VCN_HARVEST_VCN1))
96 /* both instances are harvested, disable the block */
99 adev->vcn.num_vcn_inst = 1;
101 adev->vcn.num_enc_rings = 2;
104 vcn_v2_5_set_dec_ring_funcs(adev);
105 vcn_v2_5_set_enc_ring_funcs(adev);
106 vcn_v2_5_set_irq_funcs(adev);
112 * vcn_v2_5_sw_init - sw init for VCN block
114 * @handle: amdgpu_device pointer
116 * Load firmware and sw initialization
118 static int vcn_v2_5_sw_init(void *handle)
120 struct amdgpu_ring *ring;
122 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
124 for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
125 if (adev->vcn.harvest_config & (1 << j))
128 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
129 VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst[j].irq);
134 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
135 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
136 i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[j].irq);
142 r = amdgpu_vcn_sw_init(adev);
146 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
147 const struct common_firmware_header *hdr;
148 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
149 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN;
150 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
151 adev->firmware.fw_size +=
152 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
154 if (adev->vcn.num_vcn_inst == VCN25_MAX_HW_INSTANCES_ARCTURUS) {
155 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].ucode_id = AMDGPU_UCODE_ID_VCN1;
156 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].fw = adev->vcn.fw;
157 adev->firmware.fw_size +=
158 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
160 DRM_INFO("PSP loading VCN firmware\n");
163 r = amdgpu_vcn_resume(adev);
167 for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
168 if (adev->vcn.harvest_config & (1 << j))
170 adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
171 adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
172 adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
173 adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
174 adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
175 adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
177 adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
178 adev->vcn.inst[j].external.scratch9 = SOC15_REG_OFFSET(UVD, j, mmUVD_SCRATCH9);
179 adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
180 adev->vcn.inst[j].external.data0 = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_DATA0);
181 adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
182 adev->vcn.inst[j].external.data1 = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_DATA1);
183 adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
184 adev->vcn.inst[j].external.cmd = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_CMD);
185 adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
186 adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(UVD, j, mmUVD_NO_OP);
188 ring = &adev->vcn.inst[j].ring_dec;
189 ring->use_doorbell = true;
191 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
192 (amdgpu_sriov_vf(adev) ? 2*j : 8*j);
193 sprintf(ring->name, "vcn_dec_%d", j);
194 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
198 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
199 ring = &adev->vcn.inst[j].ring_enc[i];
200 ring->use_doorbell = true;
202 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
203 (amdgpu_sriov_vf(adev) ? (1 + i + 2*j) : (2 + i + 8*j));
205 sprintf(ring->name, "vcn_enc_%d.%d", j, i);
206 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
212 if (amdgpu_sriov_vf(adev)) {
213 r = amdgpu_virt_alloc_mm_table(adev);
218 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
219 adev->vcn.pause_dpg_mode = vcn_v2_5_pause_dpg_mode;
225 * vcn_v2_5_sw_fini - sw fini for VCN block
227 * @handle: amdgpu_device pointer
229 * VCN suspend and free up sw allocation
231 static int vcn_v2_5_sw_fini(void *handle)
234 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
236 if (amdgpu_sriov_vf(adev))
237 amdgpu_virt_free_mm_table(adev);
239 r = amdgpu_vcn_suspend(adev);
243 r = amdgpu_vcn_sw_fini(adev);
249 * vcn_v2_5_hw_init - start and test VCN block
251 * @handle: amdgpu_device pointer
253 * Initialize the hardware, boot up the VCPU and do some testing
255 static int vcn_v2_5_hw_init(void *handle)
257 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
258 struct amdgpu_ring *ring;
261 if (amdgpu_sriov_vf(adev))
262 r = vcn_v2_5_sriov_start(adev);
264 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
265 if (adev->vcn.harvest_config & (1 << j))
268 if (amdgpu_sriov_vf(adev)) {
269 adev->vcn.inst[j].ring_enc[0].sched.ready = true;
270 adev->vcn.inst[j].ring_enc[1].sched.ready = false;
271 adev->vcn.inst[j].ring_enc[2].sched.ready = false;
272 adev->vcn.inst[j].ring_dec.sched.ready = true;
275 ring = &adev->vcn.inst[j].ring_dec;
277 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
278 ring->doorbell_index, j);
280 r = amdgpu_ring_test_helper(ring);
284 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
285 ring = &adev->vcn.inst[j].ring_enc[i];
286 r = amdgpu_ring_test_helper(ring);
295 DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
296 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
302 * vcn_v2_5_hw_fini - stop the hardware block
304 * @handle: amdgpu_device pointer
306 * Stop the VCN block, mark ring as not ready any more
308 static int vcn_v2_5_hw_fini(void *handle)
310 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
311 struct amdgpu_ring *ring;
314 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
315 if (adev->vcn.harvest_config & (1 << i))
317 ring = &adev->vcn.inst[i].ring_dec;
319 if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
320 (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
321 RREG32_SOC15(VCN, i, mmUVD_STATUS)))
322 vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
324 ring->sched.ready = false;
326 for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
327 ring = &adev->vcn.inst[i].ring_enc[j];
328 ring->sched.ready = false;
336 * vcn_v2_5_suspend - suspend VCN block
338 * @handle: amdgpu_device pointer
340 * HW fini and suspend VCN block
342 static int vcn_v2_5_suspend(void *handle)
345 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
347 r = vcn_v2_5_hw_fini(adev);
351 r = amdgpu_vcn_suspend(adev);
357 * vcn_v2_5_resume - resume VCN block
359 * @handle: amdgpu_device pointer
361 * Resume firmware and hw init VCN block
363 static int vcn_v2_5_resume(void *handle)
366 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
368 r = amdgpu_vcn_resume(adev);
372 r = vcn_v2_5_hw_init(adev);
378 * vcn_v2_5_mc_resume - memory controller programming
380 * @adev: amdgpu_device pointer
382 * Let the VCN memory controller know it's offsets
384 static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
386 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
390 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
391 if (adev->vcn.harvest_config & (1 << i))
393 /* cache window 0: fw */
394 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
395 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
396 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo));
397 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
398 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi));
399 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
402 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
403 lower_32_bits(adev->vcn.inst[i].gpu_addr));
404 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
405 upper_32_bits(adev->vcn.inst[i].gpu_addr));
407 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0,
408 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
410 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size);
412 /* cache window 1: stack */
413 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
414 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
415 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
416 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
417 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET1, 0);
418 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
420 /* cache window 2: context */
421 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
422 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
423 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
424 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
425 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, 0);
426 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
430 static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
432 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
435 /* cache window 0: fw */
436 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
438 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
439 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
440 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect);
441 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
442 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
443 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect);
444 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
445 UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
447 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
448 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
449 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
450 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
451 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
452 UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
456 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
457 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
458 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
459 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
460 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
461 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
463 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
464 UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
465 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
469 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
470 UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
472 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
473 UVD, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
475 /* cache window 1: stack */
477 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
478 UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
479 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
480 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
481 UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
482 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
483 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
484 UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
486 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
487 UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
488 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
489 UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
490 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
491 UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
493 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
494 UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
496 /* cache window 2: context */
497 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
498 UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
499 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
500 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
501 UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
502 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
503 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
504 UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
505 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
506 UVD, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
508 /* non-cache window */
509 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
510 UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), 0, 0, indirect);
511 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
512 UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), 0, 0, indirect);
513 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
514 UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
515 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
516 UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0), 0, 0, indirect);
518 /* VCN global tiling registers */
519 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
520 UVD, 0, mmUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
524 * vcn_v2_5_disable_clock_gating - disable VCN clock gating
526 * @adev: amdgpu_device pointer
528 * Disable clock gating for VCN block
530 static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev)
536 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
537 if (adev->vcn.harvest_config & (1 << i))
539 /* UVD disable CGC */
540 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
541 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
542 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
544 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
545 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
546 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
547 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
549 data = RREG32_SOC15(VCN, i, mmUVD_CGC_GATE);
550 data &= ~(UVD_CGC_GATE__SYS_MASK
551 | UVD_CGC_GATE__UDEC_MASK
552 | UVD_CGC_GATE__MPEG2_MASK
553 | UVD_CGC_GATE__REGS_MASK
554 | UVD_CGC_GATE__RBC_MASK
555 | UVD_CGC_GATE__LMI_MC_MASK
556 | UVD_CGC_GATE__LMI_UMC_MASK
557 | UVD_CGC_GATE__IDCT_MASK
558 | UVD_CGC_GATE__MPRD_MASK
559 | UVD_CGC_GATE__MPC_MASK
560 | UVD_CGC_GATE__LBSI_MASK
561 | UVD_CGC_GATE__LRBBM_MASK
562 | UVD_CGC_GATE__UDEC_RE_MASK
563 | UVD_CGC_GATE__UDEC_CM_MASK
564 | UVD_CGC_GATE__UDEC_IT_MASK
565 | UVD_CGC_GATE__UDEC_DB_MASK
566 | UVD_CGC_GATE__UDEC_MP_MASK
567 | UVD_CGC_GATE__WCB_MASK
568 | UVD_CGC_GATE__VCPU_MASK
569 | UVD_CGC_GATE__MMSCH_MASK);
571 WREG32_SOC15(VCN, i, mmUVD_CGC_GATE, data);
573 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_CGC_GATE, 0, 0xFFFFFFFF, ret);
575 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
576 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
577 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
578 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
579 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
580 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
581 | UVD_CGC_CTRL__SYS_MODE_MASK
582 | UVD_CGC_CTRL__UDEC_MODE_MASK
583 | UVD_CGC_CTRL__MPEG2_MODE_MASK
584 | UVD_CGC_CTRL__REGS_MODE_MASK
585 | UVD_CGC_CTRL__RBC_MODE_MASK
586 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
587 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
588 | UVD_CGC_CTRL__IDCT_MODE_MASK
589 | UVD_CGC_CTRL__MPRD_MODE_MASK
590 | UVD_CGC_CTRL__MPC_MODE_MASK
591 | UVD_CGC_CTRL__LBSI_MODE_MASK
592 | UVD_CGC_CTRL__LRBBM_MODE_MASK
593 | UVD_CGC_CTRL__WCB_MODE_MASK
594 | UVD_CGC_CTRL__VCPU_MODE_MASK
595 | UVD_CGC_CTRL__MMSCH_MODE_MASK);
596 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
599 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE);
600 data |= (UVD_SUVD_CGC_GATE__SRE_MASK
601 | UVD_SUVD_CGC_GATE__SIT_MASK
602 | UVD_SUVD_CGC_GATE__SMP_MASK
603 | UVD_SUVD_CGC_GATE__SCM_MASK
604 | UVD_SUVD_CGC_GATE__SDB_MASK
605 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
606 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
607 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
608 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
609 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
610 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
611 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
612 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
613 | UVD_SUVD_CGC_GATE__SCLR_MASK
614 | UVD_SUVD_CGC_GATE__UVD_SC_MASK
615 | UVD_SUVD_CGC_GATE__ENT_MASK
616 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
617 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
618 | UVD_SUVD_CGC_GATE__SITE_MASK
619 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
620 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
621 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
622 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
623 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
624 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE, data);
626 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
627 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
628 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
629 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
630 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
631 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
632 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
633 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
634 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
635 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
636 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
637 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
641 static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_device *adev,
642 uint8_t sram_sel, int inst_idx, uint8_t indirect)
644 uint32_t reg_data = 0;
646 /* enable sw clock gating control */
647 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
648 reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
650 reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
651 reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
652 reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
653 reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
654 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
655 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
656 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
657 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
658 UVD_CGC_CTRL__SYS_MODE_MASK |
659 UVD_CGC_CTRL__UDEC_MODE_MASK |
660 UVD_CGC_CTRL__MPEG2_MODE_MASK |
661 UVD_CGC_CTRL__REGS_MODE_MASK |
662 UVD_CGC_CTRL__RBC_MODE_MASK |
663 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
664 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
665 UVD_CGC_CTRL__IDCT_MODE_MASK |
666 UVD_CGC_CTRL__MPRD_MODE_MASK |
667 UVD_CGC_CTRL__MPC_MODE_MASK |
668 UVD_CGC_CTRL__LBSI_MODE_MASK |
669 UVD_CGC_CTRL__LRBBM_MODE_MASK |
670 UVD_CGC_CTRL__WCB_MODE_MASK |
671 UVD_CGC_CTRL__VCPU_MODE_MASK |
672 UVD_CGC_CTRL__MMSCH_MODE_MASK);
673 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
674 UVD, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
676 /* turn off clock gating */
677 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
678 UVD, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);
680 /* turn on SUVD clock gating */
681 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
682 UVD, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
684 /* turn on sw mode in UVD_SUVD_CGC_CTRL */
685 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
686 UVD, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
690 * vcn_v2_5_enable_clock_gating - enable VCN clock gating
692 * @adev: amdgpu_device pointer
694 * Enable clock gating for VCN block
696 static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev)
701 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
702 if (adev->vcn.harvest_config & (1 << i))
705 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
706 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
707 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
709 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
710 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
711 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
712 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
714 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
715 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
716 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
717 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
718 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
719 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
720 | UVD_CGC_CTRL__SYS_MODE_MASK
721 | UVD_CGC_CTRL__UDEC_MODE_MASK
722 | UVD_CGC_CTRL__MPEG2_MODE_MASK
723 | UVD_CGC_CTRL__REGS_MODE_MASK
724 | UVD_CGC_CTRL__RBC_MODE_MASK
725 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
726 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
727 | UVD_CGC_CTRL__IDCT_MODE_MASK
728 | UVD_CGC_CTRL__MPRD_MODE_MASK
729 | UVD_CGC_CTRL__MPC_MODE_MASK
730 | UVD_CGC_CTRL__LBSI_MODE_MASK
731 | UVD_CGC_CTRL__LRBBM_MODE_MASK
732 | UVD_CGC_CTRL__WCB_MODE_MASK
733 | UVD_CGC_CTRL__VCPU_MODE_MASK);
734 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
736 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
737 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
738 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
739 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
740 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
741 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
742 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
743 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
744 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
745 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
746 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
747 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
751 static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
753 struct amdgpu_ring *ring;
754 uint32_t rb_bufsz, tmp;
756 /* disable register anti-hang mechanism */
757 WREG32_P(SOC15_REG_OFFSET(UVD, inst_idx, mmUVD_POWER_STATUS), 1,
758 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
759 /* enable dynamic power gating mode */
760 tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_POWER_STATUS);
761 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
762 tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
763 WREG32_SOC15(UVD, inst_idx, mmUVD_POWER_STATUS, tmp);
766 adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t*)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
768 /* enable clock gating */
769 vcn_v2_5_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
771 /* enable VCPU clock */
772 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
773 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
774 tmp |= UVD_VCPU_CNTL__BLK_RST_MASK;
775 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
776 UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
778 /* disable master interupt */
779 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
780 UVD, 0, mmUVD_MASTINT_EN), 0, 0, indirect);
782 /* setup mmUVD_LMI_CTRL */
783 tmp = (0x8 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
784 UVD_LMI_CTRL__REQ_MODE_MASK |
785 UVD_LMI_CTRL__CRC_RESET_MASK |
786 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
787 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
788 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
789 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
791 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
792 UVD, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);
794 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
795 UVD, 0, mmUVD_MPC_CNTL),
796 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
798 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
799 UVD, 0, mmUVD_MPC_SET_MUXA0),
800 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
801 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
802 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
803 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
805 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
806 UVD, 0, mmUVD_MPC_SET_MUXB0),
807 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
808 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
809 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
810 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
812 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
813 UVD, 0, mmUVD_MPC_SET_MUX),
814 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
815 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
816 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
818 vcn_v2_5_mc_resume_dpg_mode(adev, inst_idx, indirect);
820 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
821 UVD, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
822 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
823 UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
825 /* enable LMI MC and UMC channels */
826 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
827 UVD, 0, mmUVD_LMI_CTRL2), 0, 0, indirect);
829 /* unblock VCPU register access */
830 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
831 UVD, 0, mmUVD_RB_ARB_CTRL), 0, 0, indirect);
833 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
834 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
835 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
836 UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
838 /* enable master interrupt */
839 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
840 UVD, 0, mmUVD_MASTINT_EN),
841 UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
844 psp_update_vcn_sram(adev, inst_idx, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr,
845 (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr -
846 (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr));
848 ring = &adev->vcn.inst[inst_idx].ring_dec;
849 /* force RBC into idle state */
850 rb_bufsz = order_base_2(ring->ring_size);
851 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
852 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
853 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
854 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
855 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
856 WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_CNTL, tmp);
858 /* set the write pointer delay */
859 WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0);
861 /* set the wb address */
862 WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_RPTR_ADDR,
863 (upper_32_bits(ring->gpu_addr) >> 2));
865 /* programm the RB_BASE for ring buffer */
866 WREG32_SOC15(UVD, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
867 lower_32_bits(ring->gpu_addr));
868 WREG32_SOC15(UVD, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
869 upper_32_bits(ring->gpu_addr));
871 /* Initialize the ring buffer's read and write pointers */
872 WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_RPTR, 0);
874 WREG32_SOC15(UVD, inst_idx, mmUVD_SCRATCH2, 0);
876 ring->wptr = RREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_RPTR);
877 WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR,
878 lower_32_bits(ring->wptr));
883 static int vcn_v2_5_start(struct amdgpu_device *adev)
885 struct amdgpu_ring *ring;
886 uint32_t rb_bufsz, tmp;
889 if (adev->pm.dpm_enabled)
890 amdgpu_dpm_enable_uvd(adev, true);
892 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
893 if (adev->vcn.harvest_config & (1 << i))
895 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
896 r = vcn_v2_5_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
900 /* disable register anti-hang mechanism */
901 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS), 0,
902 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
904 /* set uvd status busy */
905 tmp = RREG32_SOC15(UVD, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
906 WREG32_SOC15(UVD, i, mmUVD_STATUS, tmp);
909 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
913 vcn_v2_5_disable_clock_gating(adev);
915 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
916 if (adev->vcn.harvest_config & (1 << i))
918 /* enable VCPU clock */
919 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
920 UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
922 /* disable master interrupt */
923 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN), 0,
924 ~UVD_MASTINT_EN__VCPU_EN_MASK);
926 /* setup mmUVD_LMI_CTRL */
927 tmp = RREG32_SOC15(UVD, i, mmUVD_LMI_CTRL);
929 WREG32_SOC15(UVD, i, mmUVD_LMI_CTRL, tmp | 0x8|
930 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
931 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
932 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
933 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
935 /* setup mmUVD_MPC_CNTL */
936 tmp = RREG32_SOC15(UVD, i, mmUVD_MPC_CNTL);
937 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
938 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
939 WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp);
941 /* setup UVD_MPC_SET_MUXA0 */
942 WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUXA0,
943 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
944 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
945 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
946 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
948 /* setup UVD_MPC_SET_MUXB0 */
949 WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUXB0,
950 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
951 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
952 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
953 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
955 /* setup mmUVD_MPC_SET_MUX */
956 WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUX,
957 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
958 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
959 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
962 vcn_v2_5_mc_resume(adev);
964 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
965 if (adev->vcn.harvest_config & (1 << i))
967 /* VCN global tiling registers */
968 WREG32_SOC15(UVD, i, mmUVD_GFX8_ADDR_CONFIG,
969 adev->gfx.config.gb_addr_config);
970 WREG32_SOC15(UVD, i, mmUVD_GFX8_ADDR_CONFIG,
971 adev->gfx.config.gb_addr_config);
973 /* enable LMI MC and UMC channels */
974 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), 0,
975 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
977 /* unblock VCPU register access */
978 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_ARB_CTRL), 0,
979 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
981 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0,
982 ~UVD_VCPU_CNTL__BLK_RST_MASK);
984 for (k = 0; k < 10; ++k) {
987 for (j = 0; j < 100; ++j) {
988 status = RREG32_SOC15(UVD, i, mmUVD_STATUS);
991 if (amdgpu_emu_mode == 1)
1000 DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
1001 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
1002 UVD_VCPU_CNTL__BLK_RST_MASK,
1003 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1005 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0,
1006 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1013 DRM_ERROR("VCN decode not responding, giving up!!!\n");
1017 /* enable master interrupt */
1018 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
1019 UVD_MASTINT_EN__VCPU_EN_MASK,
1020 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1022 /* clear the busy bit of VCN_STATUS */
1023 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 0,
1024 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1026 WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_VMID, 0);
1028 ring = &adev->vcn.inst[i].ring_dec;
1029 /* force RBC into idle state */
1030 rb_bufsz = order_base_2(ring->ring_size);
1031 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1032 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1033 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1034 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1035 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1036 WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, tmp);
1038 /* programm the RB_BASE for ring buffer */
1039 WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1040 lower_32_bits(ring->gpu_addr));
1041 WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1042 upper_32_bits(ring->gpu_addr));
1044 /* Initialize the ring buffer's read and write pointers */
1045 WREG32_SOC15(UVD, i, mmUVD_RBC_RB_RPTR, 0);
1047 ring->wptr = RREG32_SOC15(UVD, i, mmUVD_RBC_RB_RPTR);
1048 WREG32_SOC15(UVD, i, mmUVD_RBC_RB_WPTR,
1049 lower_32_bits(ring->wptr));
1050 ring = &adev->vcn.inst[i].ring_enc[0];
1051 WREG32_SOC15(UVD, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1052 WREG32_SOC15(UVD, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1053 WREG32_SOC15(UVD, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
1054 WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1055 WREG32_SOC15(UVD, i, mmUVD_RB_SIZE, ring->ring_size / 4);
1057 ring = &adev->vcn.inst[i].ring_enc[1];
1058 WREG32_SOC15(UVD, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1059 WREG32_SOC15(UVD, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1060 WREG32_SOC15(UVD, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1061 WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1062 WREG32_SOC15(UVD, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
1068 static int vcn_v2_5_mmsch_start(struct amdgpu_device *adev,
1069 struct amdgpu_mm_table *table)
1071 uint32_t data = 0, loop = 0, size = 0;
1072 uint64_t addr = table->gpu_addr;
1073 struct mmsch_v1_1_init_header *header = NULL;;
1075 header = (struct mmsch_v1_1_init_header *)table->cpu_addr;
1076 size = header->total_size;
1079 * 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of
1080 * memory descriptor location
1082 WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
1083 WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
1085 /* 2, update vmid of descriptor */
1086 data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID);
1087 data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
1088 /* use domain0 for MM scheduler */
1089 data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
1090 WREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID, data);
1092 /* 3, notify mmsch about the size of this descriptor */
1093 WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_SIZE, size);
1095 /* 4, set resp to zero */
1096 WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
1099 * 5, kick off the initialization and wait until
1100 * VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero
1102 WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);
1104 data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
1106 while ((data & 0x10000002) != 0x10000002) {
1108 data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
1116 "failed to init MMSCH, mmMMSCH_VF_MAILBOX_RESP = %x\n",
1124 static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
1126 struct amdgpu_ring *ring;
1127 uint32_t offset, size, tmp, i, rb_bufsz;
1128 uint32_t table_size = 0;
1129 struct mmsch_v1_0_cmd_direct_write direct_wt = { { 0 } };
1130 struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { { 0 } };
1131 struct mmsch_v1_0_cmd_direct_polling direct_poll = { { 0 } };
1132 struct mmsch_v1_0_cmd_end end = { { 0 } };
1133 uint32_t *init_table = adev->virt.mm_table.cpu_addr;
1134 struct mmsch_v1_1_init_header *header = (struct mmsch_v1_1_init_header *)init_table;
1136 direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
1137 direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
1138 direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING;
1139 end.cmd_header.command_type = MMSCH_COMMAND__END;
1141 header->version = MMSCH_VERSION;
1142 header->total_size = sizeof(struct mmsch_v1_1_init_header) >> 2;
1143 init_table += header->total_size;
1145 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1146 header->eng[i].table_offset = header->total_size;
1147 header->eng[i].init_status = 0;
1148 header->eng[i].table_size = 0;
1152 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(
1153 SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
1154 ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
1156 size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
1158 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1159 MMSCH_V1_0_INSERT_DIRECT_WT(
1160 SOC15_REG_OFFSET(UVD, i,
1161 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1162 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
1163 MMSCH_V1_0_INSERT_DIRECT_WT(
1164 SOC15_REG_OFFSET(UVD, i,
1165 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1166 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
1168 MMSCH_V1_0_INSERT_DIRECT_WT(
1169 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0), 0);
1171 MMSCH_V1_0_INSERT_DIRECT_WT(
1172 SOC15_REG_OFFSET(UVD, i,
1173 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1174 lower_32_bits(adev->vcn.inst[i].gpu_addr));
1175 MMSCH_V1_0_INSERT_DIRECT_WT(
1176 SOC15_REG_OFFSET(UVD, i,
1177 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1178 upper_32_bits(adev->vcn.inst[i].gpu_addr));
1180 MMSCH_V1_0_INSERT_DIRECT_WT(
1181 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0),
1182 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
1185 MMSCH_V1_0_INSERT_DIRECT_WT(
1186 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0),
1188 MMSCH_V1_0_INSERT_DIRECT_WT(
1189 SOC15_REG_OFFSET(UVD, i,
1190 mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
1191 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
1192 MMSCH_V1_0_INSERT_DIRECT_WT(
1193 SOC15_REG_OFFSET(UVD, i,
1194 mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
1195 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
1196 MMSCH_V1_0_INSERT_DIRECT_WT(
1197 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1),
1199 MMSCH_V1_0_INSERT_DIRECT_WT(
1200 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1),
1201 AMDGPU_VCN_STACK_SIZE);
1202 MMSCH_V1_0_INSERT_DIRECT_WT(
1203 SOC15_REG_OFFSET(UVD, i,
1204 mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
1205 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset +
1206 AMDGPU_VCN_STACK_SIZE));
1207 MMSCH_V1_0_INSERT_DIRECT_WT(
1208 SOC15_REG_OFFSET(UVD, i,
1209 mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
1210 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset +
1211 AMDGPU_VCN_STACK_SIZE));
1212 MMSCH_V1_0_INSERT_DIRECT_WT(
1213 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2),
1215 MMSCH_V1_0_INSERT_DIRECT_WT(
1216 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
1217 AMDGPU_VCN_CONTEXT_SIZE);
1219 ring = &adev->vcn.inst[i].ring_enc[0];
1222 MMSCH_V1_0_INSERT_DIRECT_WT(
1223 SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO),
1224 lower_32_bits(ring->gpu_addr));
1225 MMSCH_V1_0_INSERT_DIRECT_WT(
1226 SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI),
1227 upper_32_bits(ring->gpu_addr));
1228 MMSCH_V1_0_INSERT_DIRECT_WT(
1229 SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE),
1230 ring->ring_size / 4);
1232 ring = &adev->vcn.inst[i].ring_dec;
1234 MMSCH_V1_0_INSERT_DIRECT_WT(
1235 SOC15_REG_OFFSET(UVD, i,
1236 mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
1237 lower_32_bits(ring->gpu_addr));
1238 MMSCH_V1_0_INSERT_DIRECT_WT(
1239 SOC15_REG_OFFSET(UVD, i,
1240 mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
1241 upper_32_bits(ring->gpu_addr));
1243 /* force RBC into idle state */
1244 rb_bufsz = order_base_2(ring->ring_size);
1245 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1246 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1247 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1248 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1249 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1250 MMSCH_V1_0_INSERT_DIRECT_WT(
1251 SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
1253 /* add end packet */
1254 memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
1255 table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
1256 init_table += sizeof(struct mmsch_v1_0_cmd_end) / 4;
1259 header->eng[i].table_size = table_size;
1260 header->total_size += table_size;
1263 return vcn_v2_5_mmsch_start(adev, &adev->virt.mm_table);
1266 static int vcn_v2_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
1271 /* Wait for power status to be 1 */
1272 SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 1,
1273 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
1275 /* wait for read ptr to be equal to write ptr */
1276 tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR);
1277 SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
1279 tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR2);
1280 SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code);
1282 tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
1283 SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
1285 SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 1,
1286 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
1288 /* disable dynamic power gating mode */
1289 WREG32_P(SOC15_REG_OFFSET(UVD, inst_idx, mmUVD_POWER_STATUS), 0,
1290 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1295 static int vcn_v2_5_stop(struct amdgpu_device *adev)
1300 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1301 if (adev->vcn.harvest_config & (1 << i))
1303 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1304 r = vcn_v2_5_stop_dpg_mode(adev, i);
1308 /* wait for vcn idle */
1309 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7, r);
1313 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1314 UVD_LMI_STATUS__READ_CLEAN_MASK |
1315 UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1316 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1317 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp, r);
1321 /* block LMI UMC channel */
1322 tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2);
1323 tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1324 WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp);
1326 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
1327 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1328 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp, r);
1332 /* block VCPU register access */
1333 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_ARB_CTRL),
1334 UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
1335 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1338 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
1339 UVD_VCPU_CNTL__BLK_RST_MASK,
1340 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1342 /* disable VCPU clock */
1343 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0,
1344 ~(UVD_VCPU_CNTL__CLK_EN_MASK));
1347 WREG32_SOC15(VCN, i, mmUVD_STATUS, 0);
1349 vcn_v2_5_enable_clock_gating(adev);
1351 /* enable register anti-hang mechanism */
1352 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS),
1353 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK,
1354 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1357 if (adev->pm.dpm_enabled)
1358 amdgpu_dpm_enable_uvd(adev, false);
1363 static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
1364 int inst_idx, struct dpg_pause_state *new_state)
1366 struct amdgpu_ring *ring;
1367 uint32_t reg_data = 0;
1370 /* pause/unpause if state is changed */
1371 if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1372 DRM_DEBUG("dpg pause state changed %d -> %d",
1373 adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based);
1374 reg_data = RREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE) &
1375 (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1377 if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1379 SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 0x1,
1380 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
1384 reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1385 WREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE, reg_data);
1388 SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_DPG_PAUSE,
1389 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1390 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code);
1393 ring = &adev->vcn.inst[inst_idx].ring_enc[0];
1394 WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
1395 WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1396 WREG32_SOC15(UVD, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
1397 WREG32_SOC15(UVD, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1398 WREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1400 ring = &adev->vcn.inst[inst_idx].ring_enc[1];
1401 WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1402 WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1403 WREG32_SOC15(UVD, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
1404 WREG32_SOC15(UVD, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1405 WREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1407 WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR,
1408 RREG32_SOC15(UVD, inst_idx, mmUVD_SCRATCH2) & 0x7FFFFFFF);
1410 SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS,
1411 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
1414 /* unpause dpg, no need to wait */
1415 reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1416 WREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE, reg_data);
1418 adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1425 * vcn_v2_5_dec_ring_get_rptr - get read pointer
1427 * @ring: amdgpu_ring pointer
1429 * Returns the current hardware read pointer
1431 static uint64_t vcn_v2_5_dec_ring_get_rptr(struct amdgpu_ring *ring)
1433 struct amdgpu_device *adev = ring->adev;
1435 return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR);
1439 * vcn_v2_5_dec_ring_get_wptr - get write pointer
1441 * @ring: amdgpu_ring pointer
1443 * Returns the current hardware write pointer
1445 static uint64_t vcn_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring)
1447 struct amdgpu_device *adev = ring->adev;
1449 if (ring->use_doorbell)
1450 return adev->wb.wb[ring->wptr_offs];
1452 return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR);
1456 * vcn_v2_5_dec_ring_set_wptr - set write pointer
1458 * @ring: amdgpu_ring pointer
1460 * Commits the write pointer to the hardware
1462 static void vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
1464 struct amdgpu_device *adev = ring->adev;
1466 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1467 WREG32_SOC15(UVD, ring->me, mmUVD_SCRATCH2,
1468 lower_32_bits(ring->wptr) | 0x80000000);
1470 if (ring->use_doorbell) {
1471 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1472 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1474 WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
1478 static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = {
1479 .type = AMDGPU_RING_TYPE_VCN_DEC,
1481 .vmhub = AMDGPU_MMHUB_1,
1482 .get_rptr = vcn_v2_5_dec_ring_get_rptr,
1483 .get_wptr = vcn_v2_5_dec_ring_get_wptr,
1484 .set_wptr = vcn_v2_5_dec_ring_set_wptr,
1486 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1487 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1488 8 + /* vcn_v2_0_dec_ring_emit_vm_flush */
1489 14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */
1491 .emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */
1492 .emit_ib = vcn_v2_0_dec_ring_emit_ib,
1493 .emit_fence = vcn_v2_0_dec_ring_emit_fence,
1494 .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
1495 .test_ring = vcn_v2_0_dec_ring_test_ring,
1496 .test_ib = amdgpu_vcn_dec_ring_test_ib,
1497 .insert_nop = vcn_v2_0_dec_ring_insert_nop,
1498 .insert_start = vcn_v2_0_dec_ring_insert_start,
1499 .insert_end = vcn_v2_0_dec_ring_insert_end,
1500 .pad_ib = amdgpu_ring_generic_pad_ib,
1501 .begin_use = amdgpu_vcn_ring_begin_use,
1502 .end_use = amdgpu_vcn_ring_end_use,
1503 .emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
1504 .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
1505 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1509 * vcn_v2_5_enc_ring_get_rptr - get enc read pointer
1511 * @ring: amdgpu_ring pointer
1513 * Returns the current hardware enc read pointer
1515 static uint64_t vcn_v2_5_enc_ring_get_rptr(struct amdgpu_ring *ring)
1517 struct amdgpu_device *adev = ring->adev;
1519 if (ring == &adev->vcn.inst[ring->me].ring_enc[0])
1520 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR);
1522 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2);
1526 * vcn_v2_5_enc_ring_get_wptr - get enc write pointer
1528 * @ring: amdgpu_ring pointer
1530 * Returns the current hardware enc write pointer
1532 static uint64_t vcn_v2_5_enc_ring_get_wptr(struct amdgpu_ring *ring)
1534 struct amdgpu_device *adev = ring->adev;
1536 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
1537 if (ring->use_doorbell)
1538 return adev->wb.wb[ring->wptr_offs];
1540 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR);
1542 if (ring->use_doorbell)
1543 return adev->wb.wb[ring->wptr_offs];
1545 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2);
1550 * vcn_v2_5_enc_ring_set_wptr - set enc write pointer
1552 * @ring: amdgpu_ring pointer
1554 * Commits the enc write pointer to the hardware
1556 static void vcn_v2_5_enc_ring_set_wptr(struct amdgpu_ring *ring)
1558 struct amdgpu_device *adev = ring->adev;
1560 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
1561 if (ring->use_doorbell) {
1562 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1563 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1565 WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1568 if (ring->use_doorbell) {
1569 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1570 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1572 WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1577 static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = {
1578 .type = AMDGPU_RING_TYPE_VCN_ENC,
1580 .nop = VCN_ENC_CMD_NO_OP,
1581 .vmhub = AMDGPU_MMHUB_1,
1582 .get_rptr = vcn_v2_5_enc_ring_get_rptr,
1583 .get_wptr = vcn_v2_5_enc_ring_get_wptr,
1584 .set_wptr = vcn_v2_5_enc_ring_set_wptr,
1586 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1587 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1588 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
1589 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
1590 1, /* vcn_v2_0_enc_ring_insert_end */
1591 .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
1592 .emit_ib = vcn_v2_0_enc_ring_emit_ib,
1593 .emit_fence = vcn_v2_0_enc_ring_emit_fence,
1594 .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
1595 .test_ring = amdgpu_vcn_enc_ring_test_ring,
1596 .test_ib = amdgpu_vcn_enc_ring_test_ib,
1597 .insert_nop = amdgpu_ring_insert_nop,
1598 .insert_end = vcn_v2_0_enc_ring_insert_end,
1599 .pad_ib = amdgpu_ring_generic_pad_ib,
1600 .begin_use = amdgpu_vcn_ring_begin_use,
1601 .end_use = amdgpu_vcn_ring_end_use,
1602 .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
1603 .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
1604 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1607 static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
1611 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1612 if (adev->vcn.harvest_config & (1 << i))
1614 adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs;
1615 adev->vcn.inst[i].ring_dec.me = i;
1616 DRM_INFO("VCN(%d) decode is enabled in VM mode\n", i);
1620 static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev)
1624 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
1625 if (adev->vcn.harvest_config & (1 << j))
1627 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
1628 adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs;
1629 adev->vcn.inst[j].ring_enc[i].me = j;
1631 DRM_INFO("VCN(%d) encode is enabled in VM mode\n", j);
1635 static bool vcn_v2_5_is_idle(void *handle)
1637 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1640 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1641 if (adev->vcn.harvest_config & (1 << i))
1643 ret &= (RREG32_SOC15(VCN, i, mmUVD_STATUS) == UVD_STATUS__IDLE);
1649 static int vcn_v2_5_wait_for_idle(void *handle)
1651 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1654 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1655 if (adev->vcn.harvest_config & (1 << i))
1657 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE,
1658 UVD_STATUS__IDLE, ret);
1666 static int vcn_v2_5_set_clockgating_state(void *handle,
1667 enum amd_clockgating_state state)
1669 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1670 bool enable = (state == AMD_CG_STATE_GATE);
1672 if (amdgpu_sriov_vf(adev))
1676 if (!vcn_v2_5_is_idle(handle))
1678 vcn_v2_5_enable_clock_gating(adev);
1680 vcn_v2_5_disable_clock_gating(adev);
1686 static int vcn_v2_5_set_powergating_state(void *handle,
1687 enum amd_powergating_state state)
1689 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1692 if (amdgpu_sriov_vf(adev))
1695 if(state == adev->vcn.cur_state)
1698 if (state == AMD_PG_STATE_GATE)
1699 ret = vcn_v2_5_stop(adev);
1701 ret = vcn_v2_5_start(adev);
1704 adev->vcn.cur_state = state;
1709 static int vcn_v2_5_set_interrupt_state(struct amdgpu_device *adev,
1710 struct amdgpu_irq_src *source,
1712 enum amdgpu_interrupt_state state)
1717 static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev,
1718 struct amdgpu_irq_src *source,
1719 struct amdgpu_iv_entry *entry)
1721 uint32_t ip_instance;
1723 switch (entry->client_id) {
1724 case SOC15_IH_CLIENTID_VCN:
1727 case SOC15_IH_CLIENTID_VCN1:
1731 DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
1735 DRM_DEBUG("IH: VCN TRAP\n");
1737 switch (entry->src_id) {
1738 case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT:
1739 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_dec);
1741 case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
1742 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
1744 case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
1745 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]);
1748 DRM_ERROR("Unhandled interrupt: %d %d\n",
1749 entry->src_id, entry->src_data[0]);
1756 static const struct amdgpu_irq_src_funcs vcn_v2_5_irq_funcs = {
1757 .set = vcn_v2_5_set_interrupt_state,
1758 .process = vcn_v2_5_process_interrupt,
1761 static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
1765 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1766 if (adev->vcn.harvest_config & (1 << i))
1768 adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
1769 adev->vcn.inst[i].irq.funcs = &vcn_v2_5_irq_funcs;
1773 static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
1775 .early_init = vcn_v2_5_early_init,
1777 .sw_init = vcn_v2_5_sw_init,
1778 .sw_fini = vcn_v2_5_sw_fini,
1779 .hw_init = vcn_v2_5_hw_init,
1780 .hw_fini = vcn_v2_5_hw_fini,
1781 .suspend = vcn_v2_5_suspend,
1782 .resume = vcn_v2_5_resume,
1783 .is_idle = vcn_v2_5_is_idle,
1784 .wait_for_idle = vcn_v2_5_wait_for_idle,
1785 .check_soft_reset = NULL,
1786 .pre_soft_reset = NULL,
1788 .post_soft_reset = NULL,
1789 .set_clockgating_state = vcn_v2_5_set_clockgating_state,
1790 .set_powergating_state = vcn_v2_5_set_powergating_state,
1793 const struct amdgpu_ip_block_version vcn_v2_5_ip_block =
1795 .type = AMD_IP_BLOCK_TYPE_VCN,
1799 .funcs = &vcn_v2_5_ip_funcs,