2 * Copyright 2016 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
27 #include <linux/firmware.h>
28 #include <linux/module.h>
29 #include <linux/pci.h>
34 #include "amdgpu_pm.h"
35 #include "amdgpu_vcn.h"
37 #include "soc15_common.h"
39 #include "vcn/vcn_1_0_offset.h"
40 #include "vcn/vcn_1_0_sh_mask.h"
42 /* 1 second timeout */
43 #define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000)
46 #define FIRMWARE_RAVEN "/*(DEBLOBBED)*/"
47 #define FIRMWARE_PICASSO "/*(DEBLOBBED)*/"
48 #define FIRMWARE_RAVEN2 "/*(DEBLOBBED)*/"
49 #define FIRMWARE_NAVI10 "/*(DEBLOBBED)*/"
53 static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
55 int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
57 unsigned long bo_size;
59 const struct common_firmware_header *hdr;
60 unsigned char fw_check;
63 INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
65 switch (adev->asic_type) {
67 if (adev->rev_id >= 8)
68 fw_name = FIRMWARE_RAVEN2;
69 else if (adev->pdev->device == 0x15d8)
70 fw_name = FIRMWARE_PICASSO;
72 fw_name = FIRMWARE_RAVEN;
75 fw_name = FIRMWARE_NAVI10;
76 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
77 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
78 adev->vcn.indirect_sram = true;
84 r = reject_firmware(&adev->vcn.fw, fw_name, adev->dev);
86 dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n",
91 r = amdgpu_ucode_validate(adev->vcn.fw);
93 dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n",
95 release_firmware(adev->vcn.fw);
100 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
101 adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
103 /* Bit 20-23, it is encode major and non-zero for new naming convention.
104 * This field is part of version minor and DRM_DISABLED_FLAG in old naming
105 * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG
106 * is zero in old naming convention, this field is always zero so far.
107 * These four bits are used to tell which naming convention is present.
109 fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf;
111 unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;
113 fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff;
114 enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff;
115 enc_major = fw_check;
116 dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
117 vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
118 DRM_INFO("Found VCN firmware Version ENC: %hu.%hu DEC: %hu VEP: %hu Revision: %hu\n",
119 enc_major, enc_minor, dec_ver, vep, fw_rev);
121 unsigned int version_major, version_minor, family_id;
123 family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
124 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
125 version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
126 DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
127 version_major, version_minor, family_id);
130 bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
131 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
132 bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
133 r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
134 AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.vcpu_bo,
135 &adev->vcn.gpu_addr, &adev->vcn.cpu_addr);
137 dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
141 if (adev->vcn.indirect_sram) {
142 r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
143 AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.dpg_sram_bo,
144 &adev->vcn.dpg_sram_gpu_addr, &adev->vcn.dpg_sram_cpu_addr);
146 dev_err(adev->dev, "(%d) failed to allocate DPG bo\n", r);
154 int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
158 kvfree(adev->vcn.saved_bo);
160 if (adev->vcn.indirect_sram) {
161 amdgpu_bo_free_kernel(&adev->vcn.dpg_sram_bo,
162 &adev->vcn.dpg_sram_gpu_addr,
163 (void **)&adev->vcn.dpg_sram_cpu_addr);
166 amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo,
168 (void **)&adev->vcn.cpu_addr);
170 amdgpu_ring_fini(&adev->vcn.ring_dec);
172 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
173 amdgpu_ring_fini(&adev->vcn.ring_enc[i]);
175 amdgpu_ring_fini(&adev->vcn.ring_jpeg);
177 release_firmware(adev->vcn.fw);
182 int amdgpu_vcn_suspend(struct amdgpu_device *adev)
187 cancel_delayed_work_sync(&adev->vcn.idle_work);
189 if (adev->vcn.vcpu_bo == NULL)
192 size = amdgpu_bo_size(adev->vcn.vcpu_bo);
193 ptr = adev->vcn.cpu_addr;
195 adev->vcn.saved_bo = kvmalloc(size, GFP_KERNEL);
196 if (!adev->vcn.saved_bo)
199 memcpy_fromio(adev->vcn.saved_bo, ptr, size);
204 int amdgpu_vcn_resume(struct amdgpu_device *adev)
209 if (adev->vcn.vcpu_bo == NULL)
212 size = amdgpu_bo_size(adev->vcn.vcpu_bo);
213 ptr = adev->vcn.cpu_addr;
215 if (adev->vcn.saved_bo != NULL) {
216 memcpy_toio(ptr, adev->vcn.saved_bo, size);
217 kvfree(adev->vcn.saved_bo);
218 adev->vcn.saved_bo = NULL;
220 const struct common_firmware_header *hdr;
223 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
224 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
225 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
226 memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset,
227 le32_to_cpu(hdr->ucode_size_bytes));
228 size -= le32_to_cpu(hdr->ucode_size_bytes);
229 ptr += le32_to_cpu(hdr->ucode_size_bytes);
231 memset_io(ptr, 0, size);
237 static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
239 struct amdgpu_device *adev =
240 container_of(work, struct amdgpu_device, vcn.idle_work.work);
241 unsigned int fences = 0;
244 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
245 fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
248 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
249 struct dpg_pause_state new_state;
252 new_state.fw_based = VCN_DPG_STATE__PAUSE;
254 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
256 if (amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg))
257 new_state.jpeg = VCN_DPG_STATE__PAUSE;
259 new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
261 adev->vcn.pause_dpg_mode(adev, &new_state);
264 fences += amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg);
265 fences += amdgpu_fence_count_emitted(&adev->vcn.ring_dec);
268 amdgpu_gfx_off_ctrl(adev, true);
269 if (adev->asic_type < CHIP_NAVI10 && adev->pm.dpm_enabled)
270 amdgpu_dpm_enable_uvd(adev, false);
272 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
275 schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
279 void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
281 struct amdgpu_device *adev = ring->adev;
282 bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
285 amdgpu_gfx_off_ctrl(adev, false);
286 if (adev->asic_type < CHIP_NAVI10 && adev->pm.dpm_enabled)
287 amdgpu_dpm_enable_uvd(adev, true);
289 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
290 AMD_PG_STATE_UNGATE);
293 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
294 struct dpg_pause_state new_state;
295 unsigned int fences = 0;
298 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
299 fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
302 new_state.fw_based = VCN_DPG_STATE__PAUSE;
304 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
306 if (amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg))
307 new_state.jpeg = VCN_DPG_STATE__PAUSE;
309 new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
311 if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
312 new_state.fw_based = VCN_DPG_STATE__PAUSE;
313 else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
314 new_state.jpeg = VCN_DPG_STATE__PAUSE;
316 adev->vcn.pause_dpg_mode(adev, &new_state);
320 void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
322 schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
325 int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
327 struct amdgpu_device *adev = ring->adev;
332 WREG32(adev->vcn.external.scratch9, 0xCAFEDEAD);
333 r = amdgpu_ring_alloc(ring, 3);
336 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
337 amdgpu_ring_write(ring, 0xDEADBEEF);
338 amdgpu_ring_commit(ring);
339 for (i = 0; i < adev->usec_timeout; i++) {
340 tmp = RREG32(adev->vcn.external.scratch9);
341 if (tmp == 0xDEADBEEF)
346 if (i >= adev->usec_timeout)
352 static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
353 struct amdgpu_bo *bo,
354 struct dma_fence **fence)
356 struct amdgpu_device *adev = ring->adev;
357 struct dma_fence *f = NULL;
358 struct amdgpu_job *job;
359 struct amdgpu_ib *ib;
363 r = amdgpu_job_alloc_with_ib(adev, 64, &job);
368 addr = amdgpu_bo_gpu_offset(bo);
369 ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
371 ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
372 ib->ptr[3] = addr >> 32;
373 ib->ptr[4] = PACKET0(adev->vcn.internal.cmd, 0);
375 for (i = 6; i < 16; i += 2) {
376 ib->ptr[i] = PACKET0(adev->vcn.internal.nop, 0);
381 r = amdgpu_job_submit_direct(job, ring, &f);
385 amdgpu_bo_fence(bo, f, false);
386 amdgpu_bo_unreserve(bo);
387 amdgpu_bo_unref(&bo);
390 *fence = dma_fence_get(f);
396 amdgpu_job_free(job);
399 amdgpu_bo_unreserve(bo);
400 amdgpu_bo_unref(&bo);
404 static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
405 struct dma_fence **fence)
407 struct amdgpu_device *adev = ring->adev;
408 struct amdgpu_bo *bo = NULL;
412 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
413 AMDGPU_GEM_DOMAIN_VRAM,
414 &bo, NULL, (void **)&msg);
418 msg[0] = cpu_to_le32(0x00000028);
419 msg[1] = cpu_to_le32(0x00000038);
420 msg[2] = cpu_to_le32(0x00000001);
421 msg[3] = cpu_to_le32(0x00000000);
422 msg[4] = cpu_to_le32(handle);
423 msg[5] = cpu_to_le32(0x00000000);
424 msg[6] = cpu_to_le32(0x00000001);
425 msg[7] = cpu_to_le32(0x00000028);
426 msg[8] = cpu_to_le32(0x00000010);
427 msg[9] = cpu_to_le32(0x00000000);
428 msg[10] = cpu_to_le32(0x00000007);
429 msg[11] = cpu_to_le32(0x00000000);
430 msg[12] = cpu_to_le32(0x00000780);
431 msg[13] = cpu_to_le32(0x00000440);
432 for (i = 14; i < 1024; ++i)
433 msg[i] = cpu_to_le32(0x0);
435 return amdgpu_vcn_dec_send_msg(ring, bo, fence);
438 static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
439 struct dma_fence **fence)
441 struct amdgpu_device *adev = ring->adev;
442 struct amdgpu_bo *bo = NULL;
446 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
447 AMDGPU_GEM_DOMAIN_VRAM,
448 &bo, NULL, (void **)&msg);
452 msg[0] = cpu_to_le32(0x00000028);
453 msg[1] = cpu_to_le32(0x00000018);
454 msg[2] = cpu_to_le32(0x00000000);
455 msg[3] = cpu_to_le32(0x00000002);
456 msg[4] = cpu_to_le32(handle);
457 msg[5] = cpu_to_le32(0x00000000);
458 for (i = 6; i < 1024; ++i)
459 msg[i] = cpu_to_le32(0x0);
461 return amdgpu_vcn_dec_send_msg(ring, bo, fence);
464 int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
466 struct dma_fence *fence;
469 r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
473 r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &fence);
477 r = dma_fence_wait_timeout(fence, false, timeout);
483 dma_fence_put(fence);
488 int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
490 struct amdgpu_device *adev = ring->adev;
495 r = amdgpu_ring_alloc(ring, 16);
499 rptr = amdgpu_ring_get_rptr(ring);
501 amdgpu_ring_write(ring, VCN_ENC_CMD_END);
502 amdgpu_ring_commit(ring);
504 for (i = 0; i < adev->usec_timeout; i++) {
505 if (amdgpu_ring_get_rptr(ring) != rptr)
510 if (i >= adev->usec_timeout)
516 static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
517 struct amdgpu_bo *bo,
518 struct dma_fence **fence)
520 const unsigned ib_size_dw = 16;
521 struct amdgpu_job *job;
522 struct amdgpu_ib *ib;
523 struct dma_fence *f = NULL;
527 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
532 addr = amdgpu_bo_gpu_offset(bo);
535 ib->ptr[ib->length_dw++] = 0x00000018;
536 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
537 ib->ptr[ib->length_dw++] = handle;
538 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
539 ib->ptr[ib->length_dw++] = addr;
540 ib->ptr[ib->length_dw++] = 0x0000000b;
542 ib->ptr[ib->length_dw++] = 0x00000014;
543 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
544 ib->ptr[ib->length_dw++] = 0x0000001c;
545 ib->ptr[ib->length_dw++] = 0x00000000;
546 ib->ptr[ib->length_dw++] = 0x00000000;
548 ib->ptr[ib->length_dw++] = 0x00000008;
549 ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
551 for (i = ib->length_dw; i < ib_size_dw; ++i)
554 r = amdgpu_job_submit_direct(job, ring, &f);
559 *fence = dma_fence_get(f);
565 amdgpu_job_free(job);
569 static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
570 struct amdgpu_bo *bo,
571 struct dma_fence **fence)
573 const unsigned ib_size_dw = 16;
574 struct amdgpu_job *job;
575 struct amdgpu_ib *ib;
576 struct dma_fence *f = NULL;
580 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
585 addr = amdgpu_bo_gpu_offset(bo);
588 ib->ptr[ib->length_dw++] = 0x00000018;
589 ib->ptr[ib->length_dw++] = 0x00000001;
590 ib->ptr[ib->length_dw++] = handle;
591 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
592 ib->ptr[ib->length_dw++] = addr;
593 ib->ptr[ib->length_dw++] = 0x0000000b;
595 ib->ptr[ib->length_dw++] = 0x00000014;
596 ib->ptr[ib->length_dw++] = 0x00000002;
597 ib->ptr[ib->length_dw++] = 0x0000001c;
598 ib->ptr[ib->length_dw++] = 0x00000000;
599 ib->ptr[ib->length_dw++] = 0x00000000;
601 ib->ptr[ib->length_dw++] = 0x00000008;
602 ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
604 for (i = ib->length_dw; i < ib_size_dw; ++i)
607 r = amdgpu_job_submit_direct(job, ring, &f);
612 *fence = dma_fence_get(f);
618 amdgpu_job_free(job);
622 int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
624 struct dma_fence *fence = NULL;
625 struct amdgpu_bo *bo = NULL;
628 r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
629 AMDGPU_GEM_DOMAIN_VRAM,
634 r = amdgpu_vcn_enc_get_create_msg(ring, 1, bo, NULL);
638 r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, bo, &fence);
642 r = dma_fence_wait_timeout(fence, false, timeout);
649 dma_fence_put(fence);
650 amdgpu_bo_unreserve(bo);
651 amdgpu_bo_unref(&bo);
655 int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
657 struct amdgpu_device *adev = ring->adev;
662 WREG32(adev->vcn.external.jpeg_pitch, 0xCAFEDEAD);
663 r = amdgpu_ring_alloc(ring, 3);
667 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.jpeg_pitch, 0));
668 amdgpu_ring_write(ring, 0xDEADBEEF);
669 amdgpu_ring_commit(ring);
671 for (i = 0; i < adev->usec_timeout; i++) {
672 tmp = RREG32(adev->vcn.external.jpeg_pitch);
673 if (tmp == 0xDEADBEEF)
678 if (i >= adev->usec_timeout)
684 static int amdgpu_vcn_jpeg_set_reg(struct amdgpu_ring *ring, uint32_t handle,
685 struct dma_fence **fence)
687 struct amdgpu_device *adev = ring->adev;
688 struct amdgpu_job *job;
689 struct amdgpu_ib *ib;
690 struct dma_fence *f = NULL;
691 const unsigned ib_size_dw = 16;
694 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
700 ib->ptr[0] = PACKETJ(adev->vcn.internal.jpeg_pitch, 0, 0, PACKETJ_TYPE0);
701 ib->ptr[1] = 0xDEADBEEF;
702 for (i = 2; i < 16; i += 2) {
703 ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
708 r = amdgpu_job_submit_direct(job, ring, &f);
713 *fence = dma_fence_get(f);
719 amdgpu_job_free(job);
723 int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout)
725 struct amdgpu_device *adev = ring->adev;
728 struct dma_fence *fence = NULL;
731 r = amdgpu_vcn_jpeg_set_reg(ring, 1, &fence);
735 r = dma_fence_wait_timeout(fence, false, timeout);
745 for (i = 0; i < adev->usec_timeout; i++) {
746 tmp = RREG32(adev->vcn.external.jpeg_pitch);
747 if (tmp == 0xDEADBEEF)
752 if (i >= adev->usec_timeout)
755 dma_fence_put(fence);