2 * Copyright 2016 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
27 #include <linux/firmware.h>
28 #include <linux/module.h>
29 #include <linux/pci.h>
32 #include "amdgpu_pm.h"
33 #include "amdgpu_vcn.h"
37 #define FIRMWARE_RAVEN "/*(DEBLOBBED)*/"
38 #define FIRMWARE_PICASSO "/*(DEBLOBBED)*/"
39 #define FIRMWARE_RAVEN2 "/*(DEBLOBBED)*/"
40 #define FIRMWARE_ARCTURUS "/*(DEBLOBBED)*/"
41 #define FIRMWARE_RENOIR "/*(DEBLOBBED)*/"
42 #define FIRMWARE_NAVI10 "/*(DEBLOBBED)*/"
43 #define FIRMWARE_NAVI14 "/*(DEBLOBBED)*/"
44 #define FIRMWARE_NAVI12 "/*(DEBLOBBED)*/"
48 static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
50 int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
52 unsigned long bo_size;
54 const struct common_firmware_header *hdr;
55 unsigned char fw_check;
58 INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
60 switch (adev->asic_type) {
62 if (adev->rev_id >= 8)
63 fw_name = FIRMWARE_RAVEN2;
64 else if (adev->pdev->device == 0x15d8)
65 fw_name = FIRMWARE_PICASSO;
67 fw_name = FIRMWARE_RAVEN;
70 fw_name = FIRMWARE_ARCTURUS;
71 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
72 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
73 adev->vcn.indirect_sram = true;
76 fw_name = FIRMWARE_RENOIR;
77 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
78 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
79 adev->vcn.indirect_sram = true;
82 fw_name = FIRMWARE_NAVI10;
83 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
84 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
85 adev->vcn.indirect_sram = true;
88 fw_name = FIRMWARE_NAVI14;
89 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
90 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
91 adev->vcn.indirect_sram = true;
94 fw_name = FIRMWARE_NAVI12;
95 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
96 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
97 adev->vcn.indirect_sram = true;
103 r = reject_firmware(&adev->vcn.fw, fw_name, adev->dev);
105 dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n",
110 r = amdgpu_ucode_validate(adev->vcn.fw);
112 dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n",
114 release_firmware(adev->vcn.fw);
119 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
120 adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
122 /* Bit 20-23, it is encode major and non-zero for new naming convention.
123 * This field is part of version minor and DRM_DISABLED_FLAG in old naming
124 * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG
125 * is zero in old naming convention, this field is always zero so far.
126 * These four bits are used to tell which naming convention is present.
128 fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf;
130 unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;
132 fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff;
133 enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff;
134 enc_major = fw_check;
135 dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
136 vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
137 DRM_INFO("Found VCN firmware Version ENC: %hu.%hu DEC: %hu VEP: %hu Revision: %hu\n",
138 enc_major, enc_minor, dec_ver, vep, fw_rev);
140 unsigned int version_major, version_minor, family_id;
142 family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
143 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
144 version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
145 DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
146 version_major, version_minor, family_id);
149 bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
150 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
151 bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
153 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
154 if (adev->vcn.harvest_config & (1 << i))
157 r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
158 AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].vcpu_bo,
159 &adev->vcn.inst[i].gpu_addr, &adev->vcn.inst[i].cpu_addr);
161 dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
165 if (adev->vcn.indirect_sram) {
166 r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
167 AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].dpg_sram_bo,
168 &adev->vcn.inst[i].dpg_sram_gpu_addr, &adev->vcn.inst[i].dpg_sram_cpu_addr);
170 dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r);
179 int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
183 cancel_delayed_work_sync(&adev->vcn.idle_work);
185 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
186 if (adev->vcn.harvest_config & (1 << j))
188 if (adev->vcn.indirect_sram) {
189 amdgpu_bo_free_kernel(&adev->vcn.inst[j].dpg_sram_bo,
190 &adev->vcn.inst[j].dpg_sram_gpu_addr,
191 (void **)&adev->vcn.inst[j].dpg_sram_cpu_addr);
193 kvfree(adev->vcn.inst[j].saved_bo);
195 amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo,
196 &adev->vcn.inst[j].gpu_addr,
197 (void **)&adev->vcn.inst[j].cpu_addr);
199 amdgpu_ring_fini(&adev->vcn.inst[j].ring_dec);
201 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
202 amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]);
205 release_firmware(adev->vcn.fw);
210 int amdgpu_vcn_suspend(struct amdgpu_device *adev)
216 cancel_delayed_work_sync(&adev->vcn.idle_work);
218 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
219 if (adev->vcn.harvest_config & (1 << i))
221 if (adev->vcn.inst[i].vcpu_bo == NULL)
224 size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
225 ptr = adev->vcn.inst[i].cpu_addr;
227 adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL);
228 if (!adev->vcn.inst[i].saved_bo)
231 memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
236 int amdgpu_vcn_resume(struct amdgpu_device *adev)
242 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
243 if (adev->vcn.harvest_config & (1 << i))
245 if (adev->vcn.inst[i].vcpu_bo == NULL)
248 size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
249 ptr = adev->vcn.inst[i].cpu_addr;
251 if (adev->vcn.inst[i].saved_bo != NULL) {
252 memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size);
253 kvfree(adev->vcn.inst[i].saved_bo);
254 adev->vcn.inst[i].saved_bo = NULL;
256 const struct common_firmware_header *hdr;
259 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
260 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
261 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
262 memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset,
263 le32_to_cpu(hdr->ucode_size_bytes));
264 size -= le32_to_cpu(hdr->ucode_size_bytes);
265 ptr += le32_to_cpu(hdr->ucode_size_bytes);
267 memset_io(ptr, 0, size);
273 static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
275 struct amdgpu_device *adev =
276 container_of(work, struct amdgpu_device, vcn.idle_work.work);
277 unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
280 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
281 if (adev->vcn.harvest_config & (1 << j))
284 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
285 fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
288 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
289 struct dpg_pause_state new_state;
292 new_state.fw_based = VCN_DPG_STATE__PAUSE;
294 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
296 adev->vcn.pause_dpg_mode(adev, j, &new_state);
299 fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec);
304 amdgpu_gfx_off_ctrl(adev, true);
305 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
308 schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
312 void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
314 struct amdgpu_device *adev = ring->adev;
315 bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
318 amdgpu_gfx_off_ctrl(adev, false);
319 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
320 AMD_PG_STATE_UNGATE);
323 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
324 struct dpg_pause_state new_state;
325 unsigned int fences = 0;
328 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
329 fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]);
332 new_state.fw_based = VCN_DPG_STATE__PAUSE;
334 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
336 if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
337 new_state.fw_based = VCN_DPG_STATE__PAUSE;
339 adev->vcn.pause_dpg_mode(adev, ring->me, &new_state);
343 void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
345 schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
348 int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
350 struct amdgpu_device *adev = ring->adev;
355 WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
356 r = amdgpu_ring_alloc(ring, 3);
359 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
360 amdgpu_ring_write(ring, 0xDEADBEEF);
361 amdgpu_ring_commit(ring);
362 for (i = 0; i < adev->usec_timeout; i++) {
363 tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9);
364 if (tmp == 0xDEADBEEF)
369 if (i >= adev->usec_timeout)
375 static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
376 struct amdgpu_bo *bo,
377 struct dma_fence **fence)
379 struct amdgpu_device *adev = ring->adev;
380 struct dma_fence *f = NULL;
381 struct amdgpu_job *job;
382 struct amdgpu_ib *ib;
386 r = amdgpu_job_alloc_with_ib(adev, 64, &job);
391 addr = amdgpu_bo_gpu_offset(bo);
392 ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
394 ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
395 ib->ptr[3] = addr >> 32;
396 ib->ptr[4] = PACKET0(adev->vcn.internal.cmd, 0);
398 for (i = 6; i < 16; i += 2) {
399 ib->ptr[i] = PACKET0(adev->vcn.internal.nop, 0);
404 r = amdgpu_job_submit_direct(job, ring, &f);
408 amdgpu_bo_fence(bo, f, false);
409 amdgpu_bo_unreserve(bo);
410 amdgpu_bo_unref(&bo);
413 *fence = dma_fence_get(f);
419 amdgpu_job_free(job);
422 amdgpu_bo_unreserve(bo);
423 amdgpu_bo_unref(&bo);
427 static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
428 struct dma_fence **fence)
430 struct amdgpu_device *adev = ring->adev;
431 struct amdgpu_bo *bo = NULL;
435 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
436 AMDGPU_GEM_DOMAIN_VRAM,
437 &bo, NULL, (void **)&msg);
441 msg[0] = cpu_to_le32(0x00000028);
442 msg[1] = cpu_to_le32(0x00000038);
443 msg[2] = cpu_to_le32(0x00000001);
444 msg[3] = cpu_to_le32(0x00000000);
445 msg[4] = cpu_to_le32(handle);
446 msg[5] = cpu_to_le32(0x00000000);
447 msg[6] = cpu_to_le32(0x00000001);
448 msg[7] = cpu_to_le32(0x00000028);
449 msg[8] = cpu_to_le32(0x00000010);
450 msg[9] = cpu_to_le32(0x00000000);
451 msg[10] = cpu_to_le32(0x00000007);
452 msg[11] = cpu_to_le32(0x00000000);
453 msg[12] = cpu_to_le32(0x00000780);
454 msg[13] = cpu_to_le32(0x00000440);
455 for (i = 14; i < 1024; ++i)
456 msg[i] = cpu_to_le32(0x0);
458 return amdgpu_vcn_dec_send_msg(ring, bo, fence);
461 static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
462 struct dma_fence **fence)
464 struct amdgpu_device *adev = ring->adev;
465 struct amdgpu_bo *bo = NULL;
469 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
470 AMDGPU_GEM_DOMAIN_VRAM,
471 &bo, NULL, (void **)&msg);
475 msg[0] = cpu_to_le32(0x00000028);
476 msg[1] = cpu_to_le32(0x00000018);
477 msg[2] = cpu_to_le32(0x00000000);
478 msg[3] = cpu_to_le32(0x00000002);
479 msg[4] = cpu_to_le32(handle);
480 msg[5] = cpu_to_le32(0x00000000);
481 for (i = 6; i < 1024; ++i)
482 msg[i] = cpu_to_le32(0x0);
484 return amdgpu_vcn_dec_send_msg(ring, bo, fence);
487 int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
489 struct dma_fence *fence;
492 r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
496 r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &fence);
500 r = dma_fence_wait_timeout(fence, false, timeout);
506 dma_fence_put(fence);
511 int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
513 struct amdgpu_device *adev = ring->adev;
518 if (amdgpu_sriov_vf(adev))
521 r = amdgpu_ring_alloc(ring, 16);
525 rptr = amdgpu_ring_get_rptr(ring);
527 amdgpu_ring_write(ring, VCN_ENC_CMD_END);
528 amdgpu_ring_commit(ring);
530 for (i = 0; i < adev->usec_timeout; i++) {
531 if (amdgpu_ring_get_rptr(ring) != rptr)
536 if (i >= adev->usec_timeout)
542 static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
543 struct amdgpu_bo *bo,
544 struct dma_fence **fence)
546 const unsigned ib_size_dw = 16;
547 struct amdgpu_job *job;
548 struct amdgpu_ib *ib;
549 struct dma_fence *f = NULL;
553 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
558 addr = amdgpu_bo_gpu_offset(bo);
561 ib->ptr[ib->length_dw++] = 0x00000018;
562 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
563 ib->ptr[ib->length_dw++] = handle;
564 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
565 ib->ptr[ib->length_dw++] = addr;
566 ib->ptr[ib->length_dw++] = 0x0000000b;
568 ib->ptr[ib->length_dw++] = 0x00000014;
569 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
570 ib->ptr[ib->length_dw++] = 0x0000001c;
571 ib->ptr[ib->length_dw++] = 0x00000000;
572 ib->ptr[ib->length_dw++] = 0x00000000;
574 ib->ptr[ib->length_dw++] = 0x00000008;
575 ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
577 for (i = ib->length_dw; i < ib_size_dw; ++i)
580 r = amdgpu_job_submit_direct(job, ring, &f);
585 *fence = dma_fence_get(f);
591 amdgpu_job_free(job);
595 static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
596 struct amdgpu_bo *bo,
597 struct dma_fence **fence)
599 const unsigned ib_size_dw = 16;
600 struct amdgpu_job *job;
601 struct amdgpu_ib *ib;
602 struct dma_fence *f = NULL;
606 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
611 addr = amdgpu_bo_gpu_offset(bo);
614 ib->ptr[ib->length_dw++] = 0x00000018;
615 ib->ptr[ib->length_dw++] = 0x00000001;
616 ib->ptr[ib->length_dw++] = handle;
617 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
618 ib->ptr[ib->length_dw++] = addr;
619 ib->ptr[ib->length_dw++] = 0x0000000b;
621 ib->ptr[ib->length_dw++] = 0x00000014;
622 ib->ptr[ib->length_dw++] = 0x00000002;
623 ib->ptr[ib->length_dw++] = 0x0000001c;
624 ib->ptr[ib->length_dw++] = 0x00000000;
625 ib->ptr[ib->length_dw++] = 0x00000000;
627 ib->ptr[ib->length_dw++] = 0x00000008;
628 ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
630 for (i = ib->length_dw; i < ib_size_dw; ++i)
633 r = amdgpu_job_submit_direct(job, ring, &f);
638 *fence = dma_fence_get(f);
644 amdgpu_job_free(job);
648 int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
650 struct dma_fence *fence = NULL;
651 struct amdgpu_bo *bo = NULL;
654 r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
655 AMDGPU_GEM_DOMAIN_VRAM,
660 r = amdgpu_vcn_enc_get_create_msg(ring, 1, bo, NULL);
664 r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, bo, &fence);
668 r = dma_fence_wait_timeout(fence, false, timeout);
675 dma_fence_put(fence);
676 amdgpu_bo_unreserve(bo);
677 amdgpu_bo_unref(&bo);