2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #define pr_fmt(fmt) "kfd2kgd: " fmt
25 #include <linux/module.h>
26 #include <linux/fdtable.h>
27 #include <linux/uaccess.h>
28 #include <linux/firmware.h>
29 #include <linux/mmu_context.h>
32 #include "amdgpu_amdkfd.h"
33 #include "amdgpu_ucode.h"
34 #include "soc15_hw_ip.h"
35 #include "gc/gc_10_1_0_offset.h"
36 #include "gc/gc_10_1_0_sh_mask.h"
37 #include "navi10_enum.h"
38 #include "athub/athub_2_0_0_offset.h"
39 #include "athub/athub_2_0_0_sh_mask.h"
40 #include "oss/osssys_5_0_0_offset.h"
41 #include "oss/osssys_5_0_0_sh_mask.h"
42 #include "soc15_common.h"
43 #include "v10_structs.h"
47 enum hqd_dequeue_request_type {
55 * Register access functions
58 static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
59 uint32_t sh_mem_config,
60 uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit,
61 uint32_t sh_mem_bases);
62 static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
64 static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
65 static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
66 uint32_t queue_id, uint32_t __user *wptr,
67 uint32_t wptr_shift, uint32_t wptr_mask,
68 struct mm_struct *mm);
69 static int kgd_hqd_dump(struct kgd_dev *kgd,
70 uint32_t pipe_id, uint32_t queue_id,
71 uint32_t (**dump)[2], uint32_t *n_regs);
72 static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
73 uint32_t __user *wptr, struct mm_struct *mm);
74 static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
75 uint32_t engine_id, uint32_t queue_id,
76 uint32_t (**dump)[2], uint32_t *n_regs);
77 static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
78 uint32_t pipe_id, uint32_t queue_id);
79 static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
80 static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
81 enum kfd_preempt_type reset_type,
82 unsigned int utimeout, uint32_t pipe_id,
84 static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
85 unsigned int utimeout);
87 static uint32_t get_watch_base_addr(struct amdgpu_device *adev);
89 static int kgd_address_watch_disable(struct kgd_dev *kgd);
90 static int kgd_address_watch_execute(struct kgd_dev *kgd,
91 unsigned int watch_point_id,
95 static int kgd_wave_control_execute(struct kgd_dev *kgd,
96 uint32_t gfx_index_val,
98 static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
99 unsigned int watch_point_id,
100 unsigned int reg_offset);
102 static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
104 static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
106 static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
107 uint64_t page_table_base);
108 static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
109 static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
111 /* Because of REG_GET_FIELD() being used, we put this function in the
112 * asic specific file.
114 static int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
115 struct tile_config *config)
117 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
119 config->gb_addr_config = adev->gfx.config.gb_addr_config;
121 /* TODO - confirm REG_GET_FIELD x2, should be OK as is... but
122 * MC_ARB_RAMCFG register doesn't exist on Vega10 - initial amdgpu
123 * changes commented out related code, doing the same here for now but
124 * need to sync with Ken et al
126 config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
127 MC_ARB_RAMCFG, NOOFBANK);
128 config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
129 MC_ARB_RAMCFG, NOOFRANKS);
132 config->tile_config_ptr = adev->gfx.config.tile_mode_array;
133 config->num_tile_configs =
134 ARRAY_SIZE(adev->gfx.config.tile_mode_array);
135 config->macro_tile_config_ptr =
136 adev->gfx.config.macrotile_mode_array;
137 config->num_macro_tile_configs =
138 ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
143 static const struct kfd2kgd_calls kfd2kgd = {
144 .program_sh_mem_settings = kgd_program_sh_mem_settings,
145 .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
146 .init_interrupts = kgd_init_interrupts,
147 .hqd_load = kgd_hqd_load,
148 .hqd_sdma_load = kgd_hqd_sdma_load,
149 .hqd_dump = kgd_hqd_dump,
150 .hqd_sdma_dump = kgd_hqd_sdma_dump,
151 .hqd_is_occupied = kgd_hqd_is_occupied,
152 .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
153 .hqd_destroy = kgd_hqd_destroy,
154 .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
155 .address_watch_disable = kgd_address_watch_disable,
156 .address_watch_execute = kgd_address_watch_execute,
157 .wave_control_execute = kgd_wave_control_execute,
158 .address_watch_get_offset = kgd_address_watch_get_offset,
159 .get_atc_vmid_pasid_mapping_pasid =
160 get_atc_vmid_pasid_mapping_pasid,
161 .get_atc_vmid_pasid_mapping_valid =
162 get_atc_vmid_pasid_mapping_valid,
163 .invalidate_tlbs = invalidate_tlbs,
164 .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
165 .set_vm_context_page_table_base = set_vm_context_page_table_base,
166 .get_tile_config = amdgpu_amdkfd_get_tile_config,
169 struct kfd2kgd_calls *amdgpu_amdkfd_gfx_10_0_get_functions()
171 return (struct kfd2kgd_calls *)&kfd2kgd;
174 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
176 return (struct amdgpu_device *)kgd;
179 static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
180 uint32_t queue, uint32_t vmid)
182 struct amdgpu_device *adev = get_amdgpu_device(kgd);
184 mutex_lock(&adev->srbm_mutex);
185 nv_grbm_select(adev, mec, pipe, queue, vmid);
188 static void unlock_srbm(struct kgd_dev *kgd)
190 struct amdgpu_device *adev = get_amdgpu_device(kgd);
192 nv_grbm_select(adev, 0, 0, 0, 0);
193 mutex_unlock(&adev->srbm_mutex);
196 static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
199 struct amdgpu_device *adev = get_amdgpu_device(kgd);
201 uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
202 uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
204 lock_srbm(kgd, mec, pipe, queue_id, 0);
207 static uint32_t get_queue_mask(struct amdgpu_device *adev,
208 uint32_t pipe_id, uint32_t queue_id)
210 unsigned int bit = (pipe_id * adev->gfx.mec.num_queue_per_pipe +
213 return ((uint32_t)1) << bit;
216 static void release_queue(struct kgd_dev *kgd)
221 static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
222 uint32_t sh_mem_config,
223 uint32_t sh_mem_ape1_base,
224 uint32_t sh_mem_ape1_limit,
225 uint32_t sh_mem_bases)
227 struct amdgpu_device *adev = get_amdgpu_device(kgd);
229 lock_srbm(kgd, 0, 0, 0, vmid);
231 WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), sh_mem_config);
232 WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_BASES), sh_mem_bases);
233 /* APE1 no longer exists on GFX9 */
238 static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
241 struct amdgpu_device *adev = get_amdgpu_device(kgd);
244 * We have to assume that there is no outstanding mapping.
245 * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
246 * a mapping is in progress or because a mapping finished
247 * and the SW cleared it.
248 * So the protocol is to always wait & clear.
250 uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
251 ATC_VMID0_PASID_MAPPING__VALID_MASK;
253 pr_debug("pasid 0x%x vmid %d, reg value %x\n", pasid, vmid, pasid_mapping);
255 * need to do this twice, once for gfx and once for mmhub
256 * for ATC add 16 to VMID for mmhub, for IH different registers.
257 * ATC_VMID0..15 registers are separate from ATC_VMID16..31.
260 pr_debug("ATHUB, reg %x\n", SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid);
261 WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid,
265 /* TODO: uncomment this code when the hardware support is ready. */
266 while (!(RREG32(SOC15_REG_OFFSET(
268 mmATC_VMID_PASID_MAPPING_UPDATE_STATUS)) &
272 pr_debug("ATHUB mapping update finished\n");
273 WREG32(SOC15_REG_OFFSET(ATHUB, 0,
274 mmATC_VMID_PASID_MAPPING_UPDATE_STATUS),
278 /* Mapping vmid to pasid also for IH block */
279 pr_debug("update mapping for IH block and mmhub");
280 WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid,
286 /* TODO - RING0 form of field is obsolete, seems to date back to SI
290 static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
292 struct amdgpu_device *adev = get_amdgpu_device(kgd);
296 mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
297 pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
299 lock_srbm(kgd, mec, pipe, 0, 0);
301 WREG32(SOC15_REG_OFFSET(GC, 0, mmCPC_INT_CNTL),
302 CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
303 CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
310 static uint32_t get_sdma_base_addr(struct amdgpu_device *adev,
311 unsigned int engine_id,
312 unsigned int queue_id)
315 SOC15_REG_OFFSET(SDMA0, 0,
316 mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL,
317 /* On gfx10, mmSDMA1_xxx registers are defined NOT based
318 * on SDMA1 base address (dw 0x1860) but based on SDMA0
319 * base address (dw 0x1260). Therefore use mmSDMA0_RLC0_RB_CNTL
320 * instead of mmSDMA1_RLC0_RB_CNTL for the base address calc
323 SOC15_REG_OFFSET(SDMA1, 0,
324 mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL
328 retval = base[engine_id] + queue_id * (mmSDMA0_RLC1_RB_CNTL -
329 mmSDMA0_RLC0_RB_CNTL);
331 pr_debug("sdma base address: 0x%x\n", retval);
337 static uint32_t get_watch_base_addr(struct amdgpu_device *adev)
339 uint32_t retval = SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_ADDR_H) -
342 pr_debug("kfd: reg watch base address: 0x%x\n", retval);
348 static inline struct v10_compute_mqd *get_mqd(void *mqd)
350 return (struct v10_compute_mqd *)mqd;
353 static inline struct v10_sdma_mqd *get_sdma_mqd(void *mqd)
355 return (struct v10_sdma_mqd *)mqd;
358 static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
359 uint32_t queue_id, uint32_t __user *wptr,
360 uint32_t wptr_shift, uint32_t wptr_mask,
361 struct mm_struct *mm)
363 struct amdgpu_device *adev = get_amdgpu_device(kgd);
364 struct v10_compute_mqd *m;
366 uint32_t reg, hqd_base, data;
370 pr_debug("Load hqd of pipe %d queue %d\n", pipe_id, queue_id);
371 acquire_queue(kgd, pipe_id, queue_id);
373 /* HIQ is set during driver init period with vmid set to 0*/
374 if (m->cp_hqd_vmid == 0) {
375 uint32_t value, mec, pipe;
377 mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
378 pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
380 pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
381 mec, pipe, queue_id);
382 value = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS));
383 value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1,
384 ((mec << 5) | (pipe << 3) | queue_id | 0x80));
385 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS), value);
388 /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
389 mqd_hqd = &m->cp_mqd_base_addr_lo;
390 hqd_base = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
393 reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
394 WREG32(reg, mqd_hqd[reg - hqd_base]);
397 /* Activate doorbell logic before triggering WPTR poll. */
398 data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
399 CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
400 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL), data);
403 /* Don't read wptr with get_user because the user
404 * context may not be accessible (if this function
405 * runs in a work queue). Instead trigger a one-shot
406 * polling read from memory in the CP. This assumes
407 * that wptr is GPU-accessible in the queue's VMID via
408 * ATC or SVM. WPTR==RPTR before starting the poll so
409 * the CP starts fetching new commands from the right
412 * Guessing a 64-bit WPTR from a 32-bit RPTR is a bit
413 * tricky. Assume that the queue didn't overflow. The
414 * number of valid bits in the 32-bit RPTR depends on
415 * the queue size. The remaining bits are taken from
416 * the saved 64-bit WPTR. If the WPTR wrapped, add the
419 uint32_t queue_size =
420 2 << REG_GET_FIELD(m->cp_hqd_pq_control,
421 CP_HQD_PQ_CONTROL, QUEUE_SIZE);
422 uint64_t guessed_wptr = m->cp_hqd_pq_rptr & (queue_size - 1);
424 if ((m->cp_hqd_pq_wptr_lo & (queue_size - 1)) < guessed_wptr)
425 guessed_wptr += queue_size;
426 guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1);
427 guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32;
429 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_LO),
430 lower_32_bits(guessed_wptr));
431 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI),
432 upper_32_bits(guessed_wptr));
433 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR),
434 lower_32_bits((uint64_t)wptr));
435 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
436 upper_32_bits((uint64_t)wptr));
437 pr_debug("%s setting CP_PQ_WPTR_POLL_CNTL1 to %x\n", __func__, get_queue_mask(adev, pipe_id, queue_id));
438 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1),
439 get_queue_mask(adev, pipe_id, queue_id));
442 /* Start the EOP fetcher */
443 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_EOP_RPTR),
444 REG_SET_FIELD(m->cp_hqd_eop_rptr,
445 CP_HQD_EOP_RPTR, INIT_FETCHER, 1));
447 data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
448 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE), data);
455 static int kgd_hqd_dump(struct kgd_dev *kgd,
456 uint32_t pipe_id, uint32_t queue_id,
457 uint32_t (**dump)[2], uint32_t *n_regs)
459 struct amdgpu_device *adev = get_amdgpu_device(kgd);
461 #define HQD_N_REGS 56
462 #define DUMP_REG(addr) do { \
463 if (WARN_ON_ONCE(i >= HQD_N_REGS)) \
465 (*dump)[i][0] = (addr) << 2; \
466 (*dump)[i++][1] = RREG32(addr); \
469 *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
473 acquire_queue(kgd, pipe_id, queue_id);
475 for (reg = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
476 reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
481 WARN_ON_ONCE(i != HQD_N_REGS);
487 static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
488 uint32_t __user *wptr, struct mm_struct *mm)
490 struct amdgpu_device *adev = get_amdgpu_device(kgd);
491 struct v10_sdma_mqd *m;
492 uint32_t sdma_base_addr, sdmax_gfx_context_cntl;
493 unsigned long end_jiffies;
496 uint64_t __user *wptr64 = (uint64_t __user *)wptr;
498 m = get_sdma_mqd(mqd);
499 sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
501 pr_debug("sdma load base addr %x for engine %d, queue %d\n", sdma_base_addr, m->sdma_engine_id, m->sdma_queue_id);
502 sdmax_gfx_context_cntl = m->sdma_engine_id ?
503 SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GFX_CONTEXT_CNTL) :
504 SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_CONTEXT_CNTL);
506 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
507 m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
509 end_jiffies = msecs_to_jiffies(2000) + jiffies;
511 data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
512 if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
514 if (time_after(jiffies, end_jiffies))
516 usleep_range(500, 1000);
518 data = RREG32(sdmax_gfx_context_cntl);
519 data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
521 WREG32(sdmax_gfx_context_cntl, data);
523 WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL_OFFSET,
524 m->sdmax_rlcx_doorbell_offset);
526 data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
528 WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data);
529 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr);
530 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI,
531 m->sdmax_rlcx_rb_rptr_hi);
533 WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
534 if (read_user_wptr(mm, wptr64, data64)) {
535 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
536 lower_32_bits(data64));
537 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
538 upper_32_bits(data64));
540 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
541 m->sdmax_rlcx_rb_rptr);
542 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
543 m->sdmax_rlcx_rb_rptr_hi);
545 WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
547 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
548 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
549 m->sdmax_rlcx_rb_base_hi);
550 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
551 m->sdmax_rlcx_rb_rptr_addr_lo);
552 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
553 m->sdmax_rlcx_rb_rptr_addr_hi);
555 data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
557 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data);
562 static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
563 uint32_t engine_id, uint32_t queue_id,
564 uint32_t (**dump)[2], uint32_t *n_regs)
566 struct amdgpu_device *adev = get_amdgpu_device(kgd);
567 uint32_t sdma_base_addr = get_sdma_base_addr(adev, engine_id, queue_id);
570 #define HQD_N_REGS (19+6+7+10)
572 pr_debug("sdma dump engine id %d queue_id %d\n", engine_id, queue_id);
573 pr_debug("sdma base addr %x\n", sdma_base_addr);
575 *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
579 for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
580 DUMP_REG(sdma_base_addr + reg);
581 for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
582 DUMP_REG(sdma_base_addr + reg);
583 for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
584 reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
585 DUMP_REG(sdma_base_addr + reg);
586 for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
587 reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
588 DUMP_REG(sdma_base_addr + reg);
590 WARN_ON_ONCE(i != HQD_N_REGS);
596 static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
597 uint32_t pipe_id, uint32_t queue_id)
599 struct amdgpu_device *adev = get_amdgpu_device(kgd);
604 acquire_queue(kgd, pipe_id, queue_id);
605 act = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE));
607 low = lower_32_bits(queue_address >> 8);
608 high = upper_32_bits(queue_address >> 8);
610 if (low == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE)) &&
611 high == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE_HI)))
618 static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
620 struct amdgpu_device *adev = get_amdgpu_device(kgd);
621 struct v10_sdma_mqd *m;
622 uint32_t sdma_base_addr;
623 uint32_t sdma_rlc_rb_cntl;
625 m = get_sdma_mqd(mqd);
626 sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
629 sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
631 if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
637 static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
638 enum kfd_preempt_type reset_type,
639 unsigned int utimeout, uint32_t pipe_id,
642 struct amdgpu_device *adev = get_amdgpu_device(kgd);
643 enum hqd_dequeue_request_type type;
644 unsigned long end_jiffies;
646 struct v10_compute_mqd *m = get_mqd(mqd);
653 acquire_queue(kgd, pipe_id, queue_id);
655 if (m->cp_hqd_vmid == 0)
656 WREG32_FIELD15(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0);
658 switch (reset_type) {
659 case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
662 case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
670 #if 0 /* Is this still needed? */
671 /* Workaround: If IQ timer is active and the wait time is close to or
672 * equal to 0, dequeueing is not safe. Wait until either the wait time
673 * is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is
674 * cleared before continuing. Also, ensure wait times are set to at
677 local_irq_save(flags);
679 retry = 5000; /* wait for 500 usecs at maximum */
681 temp = RREG32(mmCP_HQD_IQ_TIMER);
682 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, PROCESSING_IQ)) {
683 pr_debug("HW is processing IQ\n");
686 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, ACTIVE)) {
687 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, RETRY_TYPE)
688 == 3) /* SEM-rearm is safe */
690 /* Wait time 3 is safe for CP, but our MMIO read/write
691 * time is close to 1 microsecond, so check for 10 to
692 * leave more buffer room
694 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, WAIT_TIME)
697 pr_debug("IQ timer is active\n");
702 pr_err("CP HQD IQ timer status time out\n");
710 temp = RREG32(mmCP_HQD_DEQUEUE_REQUEST);
711 if (!(temp & CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK))
713 pr_debug("Dequeue request is pending\n");
716 pr_err("CP HQD dequeue request time out\n");
722 local_irq_restore(flags);
726 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_DEQUEUE_REQUEST), type);
728 end_jiffies = (utimeout * HZ / 1000) + jiffies;
730 temp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE));
731 if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
733 if (time_after(jiffies, end_jiffies)) {
734 pr_err("cp queue preemption time out.\n");
738 usleep_range(500, 1000);
745 static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
746 unsigned int utimeout)
748 struct amdgpu_device *adev = get_amdgpu_device(kgd);
749 struct v10_sdma_mqd *m;
750 uint32_t sdma_base_addr;
752 unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
754 m = get_sdma_mqd(mqd);
755 sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
758 temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
759 temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
760 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
763 temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
764 if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
766 if (time_after(jiffies, end_jiffies))
768 usleep_range(500, 1000);
771 WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
772 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
773 RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
774 SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
776 m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR);
777 m->sdmax_rlcx_rb_rptr_hi =
778 RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI);
783 static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
787 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
789 reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
791 return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
794 static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
798 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
800 reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
802 return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK;
805 static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
807 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
808 uint32_t req = (1 << vmid) |
809 (0 << GCVM_INVALIDATE_ENG0_REQ__FLUSH_TYPE__SHIFT) |/* legacy */
810 GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PTES_MASK |
811 GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE0_MASK |
812 GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE1_MASK |
813 GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE2_MASK |
814 GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L1_PTES_MASK;
816 mutex_lock(&adev->srbm_mutex);
818 /* Use light weight invalidation.
820 * TODO 1: agree on the right set of invalidation registers for
821 * KFD use. Use the last one for now. Invalidate only GCHUB as
822 * SDMA is now moved to GCHUB
824 * TODO 2: support range-based invalidation, requires kfg2kgd
827 WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32),
829 WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_HI32),
832 WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_REQ), req);
834 while (!(RREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_ACK)) &
838 mutex_unlock(&adev->srbm_mutex);
841 static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid)
845 struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
847 spin_lock(&adev->gfx.kiq.ring_lock);
848 amdgpu_ring_alloc(ring, 12); /* fence + invalidate_tlbs package*/
849 amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
850 amdgpu_ring_write(ring,
851 PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
852 PACKET3_INVALIDATE_TLBS_PASID(pasid));
853 amdgpu_fence_emit_polling(ring, &seq);
854 amdgpu_ring_commit(ring);
855 spin_unlock(&adev->gfx.kiq.ring_lock);
857 r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
859 DRM_ERROR("wait for kiq fence error: %ld.\n", r);
866 static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
868 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
870 struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
872 if (amdgpu_emu_mode == 0 && ring->sched.ready)
873 return invalidate_tlbs_with_kiq(adev, pasid);
875 for (vmid = 0; vmid < 16; vmid++) {
876 if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
878 if (get_atc_vmid_pasid_mapping_valid(kgd, vmid)) {
879 if (get_atc_vmid_pasid_mapping_pasid(kgd, vmid)
881 write_vmid_invalidate_request(kgd, vmid);
890 static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
892 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
894 if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
895 pr_err("non kfd vmid %d\n", vmid);
899 write_vmid_invalidate_request(kgd, vmid);
903 static int kgd_address_watch_disable(struct kgd_dev *kgd)
908 static int kgd_address_watch_execute(struct kgd_dev *kgd,
909 unsigned int watch_point_id,
917 static int kgd_wave_control_execute(struct kgd_dev *kgd,
918 uint32_t gfx_index_val,
921 struct amdgpu_device *adev = get_amdgpu_device(kgd);
924 mutex_lock(&adev->grbm_idx_mutex);
926 WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX), gfx_index_val);
927 WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_CMD), sq_cmd);
929 data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
930 INSTANCE_BROADCAST_WRITES, 1);
931 data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
932 SA_BROADCAST_WRITES, 1);
933 data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
934 SE_BROADCAST_WRITES, 1);
936 WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX), data);
937 mutex_unlock(&adev->grbm_idx_mutex);
942 static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
943 unsigned int watch_point_id,
944 unsigned int reg_offset)
949 static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
950 uint64_t page_table_base)
952 struct amdgpu_device *adev = get_amdgpu_device(kgd);
953 uint64_t base = page_table_base | AMDGPU_PTE_VALID;
955 if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
956 pr_err("trying to set page table base for wrong VMID %u\n",
961 /* TODO: take advantage of per-process address space size. For
962 * now, all processes share the same address space size, like
965 WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32) + (vmid*2), 0);
966 WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32) + (vmid*2), 0);
968 WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32) + (vmid*2),
969 lower_32_bits(adev->vm_manager.max_pfn - 1));
970 WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32) + (vmid*2),
971 upper_32_bits(adev->vm_manager.max_pfn - 1));
973 WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32) + (vmid*2), lower_32_bits(base));
974 WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) + (vmid*2), upper_32_bits(base));