2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #ifndef KFD_IOCTL_H_INCLUDED
24 #define KFD_IOCTL_H_INCLUDED
27 #include <linux/ioctl.h>
29 #define KFD_IOCTL_MAJOR_VERSION 1
30 #define KFD_IOCTL_MINOR_VERSION 1
32 struct kfd_ioctl_get_version_args {
33 __u32 major_version; /* from KFD */
34 __u32 minor_version; /* from KFD */
37 /* For kfd_ioctl_create_queue_args.queue_type. */
38 #define KFD_IOC_QUEUE_TYPE_COMPUTE 0x0
39 #define KFD_IOC_QUEUE_TYPE_SDMA 0x1
40 #define KFD_IOC_QUEUE_TYPE_COMPUTE_AQL 0x2
41 #define KFD_IOC_QUEUE_TYPE_SDMA_XGMI 0x3
43 #define KFD_MAX_QUEUE_PERCENTAGE 100
44 #define KFD_MAX_QUEUE_PRIORITY 15
46 struct kfd_ioctl_create_queue_args {
47 __u64 ring_base_address; /* to KFD */
48 __u64 write_pointer_address; /* from KFD */
49 __u64 read_pointer_address; /* from KFD */
50 __u64 doorbell_offset; /* from KFD */
52 __u32 ring_size; /* to KFD */
53 __u32 gpu_id; /* to KFD */
54 __u32 queue_type; /* to KFD */
55 __u32 queue_percentage; /* to KFD */
56 __u32 queue_priority; /* to KFD */
57 __u32 queue_id; /* from KFD */
59 __u64 eop_buffer_address; /* to KFD */
60 __u64 eop_buffer_size; /* to KFD */
61 __u64 ctx_save_restore_address; /* to KFD */
62 __u32 ctx_save_restore_size; /* to KFD */
63 __u32 ctl_stack_size; /* to KFD */
66 struct kfd_ioctl_destroy_queue_args {
67 __u32 queue_id; /* to KFD */
71 struct kfd_ioctl_update_queue_args {
72 __u64 ring_base_address; /* to KFD */
74 __u32 queue_id; /* to KFD */
75 __u32 ring_size; /* to KFD */
76 __u32 queue_percentage; /* to KFD */
77 __u32 queue_priority; /* to KFD */
80 struct kfd_ioctl_set_cu_mask_args {
81 __u32 queue_id; /* to KFD */
82 __u32 num_cu_mask; /* to KFD */
83 __u64 cu_mask_ptr; /* to KFD */
86 struct kfd_ioctl_get_queue_wave_state_args {
87 __u64 ctl_stack_address; /* to KFD */
88 __u32 ctl_stack_used_size; /* from KFD */
89 __u32 save_area_used_size; /* from KFD */
90 __u32 queue_id; /* to KFD */
94 /* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */
95 #define KFD_IOC_CACHE_POLICY_COHERENT 0
96 #define KFD_IOC_CACHE_POLICY_NONCOHERENT 1
98 struct kfd_ioctl_set_memory_policy_args {
99 __u64 alternate_aperture_base; /* to KFD */
100 __u64 alternate_aperture_size; /* to KFD */
102 __u32 gpu_id; /* to KFD */
103 __u32 default_policy; /* to KFD */
104 __u32 alternate_policy; /* to KFD */
109 * All counters are monotonic. They are used for profiling of compute jobs.
110 * The profiling is done by userspace.
112 * In case of GPU reset, the counter should not be affected.
115 struct kfd_ioctl_get_clock_counters_args {
116 __u64 gpu_clock_counter; /* from KFD */
117 __u64 cpu_clock_counter; /* from KFD */
118 __u64 system_clock_counter; /* from KFD */
119 __u64 system_clock_freq; /* from KFD */
121 __u32 gpu_id; /* to KFD */
125 struct kfd_process_device_apertures {
126 __u64 lds_base; /* from KFD */
127 __u64 lds_limit; /* from KFD */
128 __u64 scratch_base; /* from KFD */
129 __u64 scratch_limit; /* from KFD */
130 __u64 gpuvm_base; /* from KFD */
131 __u64 gpuvm_limit; /* from KFD */
132 __u32 gpu_id; /* from KFD */
137 * AMDKFD_IOC_GET_PROCESS_APERTURES is deprecated. Use
138 * AMDKFD_IOC_GET_PROCESS_APERTURES_NEW instead, which supports an
139 * unlimited number of GPUs.
141 #define NUM_OF_SUPPORTED_GPUS 7
142 struct kfd_ioctl_get_process_apertures_args {
143 struct kfd_process_device_apertures
144 process_apertures[NUM_OF_SUPPORTED_GPUS];/* from KFD */
146 /* from KFD, should be in the range [1 - NUM_OF_SUPPORTED_GPUS] */
151 struct kfd_ioctl_get_process_apertures_new_args {
152 /* User allocated. Pointer to struct kfd_process_device_apertures
153 * filled in by Kernel
155 __u64 kfd_process_device_apertures_ptr;
156 /* to KFD - indicates amount of memory present in
157 * kfd_process_device_apertures_ptr
158 * from KFD - Number of entries filled by KFD.
164 #define MAX_ALLOWED_NUM_POINTS 100
165 #define MAX_ALLOWED_AW_BUFF_SIZE 4096
166 #define MAX_ALLOWED_WAC_BUFF_SIZE 128
168 struct kfd_ioctl_dbg_register_args {
169 __u32 gpu_id; /* to KFD */
173 struct kfd_ioctl_dbg_unregister_args {
174 __u32 gpu_id; /* to KFD */
178 struct kfd_ioctl_dbg_address_watch_args {
179 __u64 content_ptr; /* a pointer to the actual content */
180 __u32 gpu_id; /* to KFD */
181 __u32 buf_size_in_bytes; /*including gpu_id and buf_size */
184 struct kfd_ioctl_dbg_wave_control_args {
185 __u64 content_ptr; /* a pointer to the actual content */
186 __u32 gpu_id; /* to KFD */
187 __u32 buf_size_in_bytes; /*including gpu_id and buf_size */
190 /* Matching HSA_EVENTTYPE */
191 #define KFD_IOC_EVENT_SIGNAL 0
192 #define KFD_IOC_EVENT_NODECHANGE 1
193 #define KFD_IOC_EVENT_DEVICESTATECHANGE 2
194 #define KFD_IOC_EVENT_HW_EXCEPTION 3
195 #define KFD_IOC_EVENT_SYSTEM_EVENT 4
196 #define KFD_IOC_EVENT_DEBUG_EVENT 5
197 #define KFD_IOC_EVENT_PROFILE_EVENT 6
198 #define KFD_IOC_EVENT_QUEUE_EVENT 7
199 #define KFD_IOC_EVENT_MEMORY 8
201 #define KFD_IOC_WAIT_RESULT_COMPLETE 0
202 #define KFD_IOC_WAIT_RESULT_TIMEOUT 1
203 #define KFD_IOC_WAIT_RESULT_FAIL 2
205 #define KFD_SIGNAL_EVENT_LIMIT 4096
207 /* For kfd_event_data.hw_exception_data.reset_type. */
208 #define KFD_HW_EXCEPTION_WHOLE_GPU_RESET 0
209 #define KFD_HW_EXCEPTION_PER_ENGINE_RESET 1
211 /* For kfd_event_data.hw_exception_data.reset_cause. */
212 #define KFD_HW_EXCEPTION_GPU_HANG 0
213 #define KFD_HW_EXCEPTION_ECC 1
215 /* For kfd_hsa_memory_exception_data.ErrorType */
216 #define KFD_MEM_ERR_NO_RAS 0
217 #define KFD_MEM_ERR_SRAM_ECC 1
218 #define KFD_MEM_ERR_POISON_CONSUMED 2
219 #define KFD_MEM_ERR_GPU_HANG 3
221 struct kfd_ioctl_create_event_args {
222 __u64 event_page_offset; /* from KFD */
223 __u32 event_trigger_data; /* from KFD - signal events only */
224 __u32 event_type; /* to KFD */
225 __u32 auto_reset; /* to KFD */
226 __u32 node_id; /* to KFD - only valid for certain
228 __u32 event_id; /* from KFD */
229 __u32 event_slot_index; /* from KFD */
232 struct kfd_ioctl_destroy_event_args {
233 __u32 event_id; /* to KFD */
237 struct kfd_ioctl_set_event_args {
238 __u32 event_id; /* to KFD */
242 struct kfd_ioctl_reset_event_args {
243 __u32 event_id; /* to KFD */
247 struct kfd_memory_exception_failure {
248 __u32 NotPresent; /* Page not present or supervisor privilege */
249 __u32 ReadOnly; /* Write access to a read-only page */
250 __u32 NoExecute; /* Execute access to a page marked NX */
251 __u32 imprecise; /* Can't determine the exact fault address */
254 /* memory exception data*/
255 struct kfd_hsa_memory_exception_data {
256 struct kfd_memory_exception_failure failure;
259 __u32 ErrorType; /* 0 = no RAS error,
261 * 2 = Link_SYNFLOOD (poison),
262 * 3 = GPU hang (not attributable to a specific cause),
263 * other values reserved
267 /* hw exception data */
268 struct kfd_hsa_hw_exception_data {
276 struct kfd_event_data {
278 struct kfd_hsa_memory_exception_data memory_exception_data;
279 struct kfd_hsa_hw_exception_data hw_exception_data;
281 __u64 kfd_event_data_ext; /* pointer to an extension structure
282 for future exception types */
283 __u32 event_id; /* to KFD */
287 struct kfd_ioctl_wait_events_args {
288 __u64 events_ptr; /* pointed to struct
289 kfd_event_data array, to KFD */
290 __u32 num_events; /* to KFD */
291 __u32 wait_for_all; /* to KFD */
292 __u32 timeout; /* to KFD */
293 __u32 wait_result; /* from KFD */
296 struct kfd_ioctl_set_scratch_backing_va_args {
297 __u64 va_addr; /* to KFD */
298 __u32 gpu_id; /* to KFD */
302 struct kfd_ioctl_get_tile_config_args {
303 /* to KFD: pointer to tile array */
304 __u64 tile_config_ptr;
305 /* to KFD: pointer to macro tile array */
306 __u64 macro_tile_config_ptr;
307 /* to KFD: array size allocated by user mode
308 * from KFD: array size filled by kernel
310 __u32 num_tile_configs;
311 /* to KFD: array size allocated by user mode
312 * from KFD: array size filled by kernel
314 __u32 num_macro_tile_configs;
316 __u32 gpu_id; /* to KFD */
317 __u32 gb_addr_config; /* from KFD */
318 __u32 num_banks; /* from KFD */
319 __u32 num_ranks; /* from KFD */
320 /* struct size can be extended later if needed
321 * without breaking ABI compatibility
325 struct kfd_ioctl_set_trap_handler_args {
326 __u64 tba_addr; /* to KFD */
327 __u64 tma_addr; /* to KFD */
328 __u32 gpu_id; /* to KFD */
332 struct kfd_ioctl_acquire_vm_args {
333 __u32 drm_fd; /* to KFD */
334 __u32 gpu_id; /* to KFD */
337 /* Allocation flags: memory types */
338 #define KFD_IOC_ALLOC_MEM_FLAGS_VRAM (1 << 0)
339 #define KFD_IOC_ALLOC_MEM_FLAGS_GTT (1 << 1)
340 #define KFD_IOC_ALLOC_MEM_FLAGS_USERPTR (1 << 2)
341 #define KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL (1 << 3)
342 #define KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP (1 << 4)
343 /* Allocation flags: attributes/access options */
344 #define KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE (1 << 31)
345 #define KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE (1 << 30)
346 #define KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC (1 << 29)
347 #define KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE (1 << 28)
348 #define KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM (1 << 27)
349 #define KFD_IOC_ALLOC_MEM_FLAGS_COHERENT (1 << 26)
351 /* Allocate memory for later SVM (shared virtual memory) mapping.
353 * @va_addr: virtual address of the memory to be allocated
354 * all later mappings on all GPUs will use this address
355 * @size: size in bytes
356 * @handle: buffer handle returned to user mode, used to refer to
357 * this allocation for mapping, unmapping and freeing
358 * @mmap_offset: for CPU-mapping the allocation by mmapping a render node
359 * for userptrs this is overloaded to specify the CPU address
360 * @gpu_id: device identifier
361 * @flags: memory type and attributes. See KFD_IOC_ALLOC_MEM_FLAGS above
363 struct kfd_ioctl_alloc_memory_of_gpu_args {
364 __u64 va_addr; /* to KFD */
365 __u64 size; /* to KFD */
366 __u64 handle; /* from KFD */
367 __u64 mmap_offset; /* to KFD (userptr), from KFD (mmap offset) */
368 __u32 gpu_id; /* to KFD */
372 /* Free memory allocated with kfd_ioctl_alloc_memory_of_gpu
374 * @handle: memory handle returned by alloc
376 struct kfd_ioctl_free_memory_of_gpu_args {
377 __u64 handle; /* to KFD */
380 /* Map memory to one or more GPUs
382 * @handle: memory handle returned by alloc
383 * @device_ids_array_ptr: array of gpu_ids (__u32 per device)
384 * @n_devices: number of devices in the array
385 * @n_success: number of devices mapped successfully
387 * @n_success returns information to the caller how many devices from
388 * the start of the array have mapped the buffer successfully. It can
389 * be passed into a subsequent retry call to skip those devices. For
390 * the first call the caller should initialize it to 0.
392 * If the ioctl completes with return code 0 (success), n_success ==
395 struct kfd_ioctl_map_memory_to_gpu_args {
396 __u64 handle; /* to KFD */
397 __u64 device_ids_array_ptr; /* to KFD */
398 __u32 n_devices; /* to KFD */
399 __u32 n_success; /* to/from KFD */
402 /* Unmap memory from one or more GPUs
404 * same arguments as for mapping
406 struct kfd_ioctl_unmap_memory_from_gpu_args {
407 __u64 handle; /* to KFD */
408 __u64 device_ids_array_ptr; /* to KFD */
409 __u32 n_devices; /* to KFD */
410 __u32 n_success; /* to/from KFD */
413 struct kfd_ioctl_get_dmabuf_info_args {
414 __u64 size; /* from KFD */
415 __u64 metadata_ptr; /* to KFD */
416 __u32 metadata_size; /* to KFD (space allocated by user)
417 * from KFD (actual metadata size)
419 __u32 gpu_id; /* from KFD */
420 __u32 flags; /* from KFD (KFD_IOC_ALLOC_MEM_FLAGS) */
421 __u32 dmabuf_fd; /* to KFD */
424 struct kfd_ioctl_import_dmabuf_args {
425 __u64 va_addr; /* to KFD */
426 __u64 handle; /* from KFD */
427 __u32 gpu_id; /* to KFD */
428 __u32 dmabuf_fd; /* to KFD */
431 /* Register offset inside the remapped mmio page
433 enum kfd_mmio_remap {
434 KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL = 0,
435 KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL = 4,
438 #define AMDKFD_IOCTL_BASE 'K'
439 #define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr)
440 #define AMDKFD_IOR(nr, type) _IOR(AMDKFD_IOCTL_BASE, nr, type)
441 #define AMDKFD_IOW(nr, type) _IOW(AMDKFD_IOCTL_BASE, nr, type)
442 #define AMDKFD_IOWR(nr, type) _IOWR(AMDKFD_IOCTL_BASE, nr, type)
444 #define AMDKFD_IOC_GET_VERSION \
445 AMDKFD_IOR(0x01, struct kfd_ioctl_get_version_args)
447 #define AMDKFD_IOC_CREATE_QUEUE \
448 AMDKFD_IOWR(0x02, struct kfd_ioctl_create_queue_args)
450 #define AMDKFD_IOC_DESTROY_QUEUE \
451 AMDKFD_IOWR(0x03, struct kfd_ioctl_destroy_queue_args)
453 #define AMDKFD_IOC_SET_MEMORY_POLICY \
454 AMDKFD_IOW(0x04, struct kfd_ioctl_set_memory_policy_args)
456 #define AMDKFD_IOC_GET_CLOCK_COUNTERS \
457 AMDKFD_IOWR(0x05, struct kfd_ioctl_get_clock_counters_args)
459 #define AMDKFD_IOC_GET_PROCESS_APERTURES \
460 AMDKFD_IOR(0x06, struct kfd_ioctl_get_process_apertures_args)
462 #define AMDKFD_IOC_UPDATE_QUEUE \
463 AMDKFD_IOW(0x07, struct kfd_ioctl_update_queue_args)
465 #define AMDKFD_IOC_CREATE_EVENT \
466 AMDKFD_IOWR(0x08, struct kfd_ioctl_create_event_args)
468 #define AMDKFD_IOC_DESTROY_EVENT \
469 AMDKFD_IOW(0x09, struct kfd_ioctl_destroy_event_args)
471 #define AMDKFD_IOC_SET_EVENT \
472 AMDKFD_IOW(0x0A, struct kfd_ioctl_set_event_args)
474 #define AMDKFD_IOC_RESET_EVENT \
475 AMDKFD_IOW(0x0B, struct kfd_ioctl_reset_event_args)
477 #define AMDKFD_IOC_WAIT_EVENTS \
478 AMDKFD_IOWR(0x0C, struct kfd_ioctl_wait_events_args)
480 #define AMDKFD_IOC_DBG_REGISTER \
481 AMDKFD_IOW(0x0D, struct kfd_ioctl_dbg_register_args)
483 #define AMDKFD_IOC_DBG_UNREGISTER \
484 AMDKFD_IOW(0x0E, struct kfd_ioctl_dbg_unregister_args)
486 #define AMDKFD_IOC_DBG_ADDRESS_WATCH \
487 AMDKFD_IOW(0x0F, struct kfd_ioctl_dbg_address_watch_args)
489 #define AMDKFD_IOC_DBG_WAVE_CONTROL \
490 AMDKFD_IOW(0x10, struct kfd_ioctl_dbg_wave_control_args)
492 #define AMDKFD_IOC_SET_SCRATCH_BACKING_VA \
493 AMDKFD_IOWR(0x11, struct kfd_ioctl_set_scratch_backing_va_args)
495 #define AMDKFD_IOC_GET_TILE_CONFIG \
496 AMDKFD_IOWR(0x12, struct kfd_ioctl_get_tile_config_args)
498 #define AMDKFD_IOC_SET_TRAP_HANDLER \
499 AMDKFD_IOW(0x13, struct kfd_ioctl_set_trap_handler_args)
501 #define AMDKFD_IOC_GET_PROCESS_APERTURES_NEW \
503 struct kfd_ioctl_get_process_apertures_new_args)
505 #define AMDKFD_IOC_ACQUIRE_VM \
506 AMDKFD_IOW(0x15, struct kfd_ioctl_acquire_vm_args)
508 #define AMDKFD_IOC_ALLOC_MEMORY_OF_GPU \
509 AMDKFD_IOWR(0x16, struct kfd_ioctl_alloc_memory_of_gpu_args)
511 #define AMDKFD_IOC_FREE_MEMORY_OF_GPU \
512 AMDKFD_IOW(0x17, struct kfd_ioctl_free_memory_of_gpu_args)
514 #define AMDKFD_IOC_MAP_MEMORY_TO_GPU \
515 AMDKFD_IOWR(0x18, struct kfd_ioctl_map_memory_to_gpu_args)
517 #define AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU \
518 AMDKFD_IOWR(0x19, struct kfd_ioctl_unmap_memory_from_gpu_args)
520 #define AMDKFD_IOC_SET_CU_MASK \
521 AMDKFD_IOW(0x1A, struct kfd_ioctl_set_cu_mask_args)
523 #define AMDKFD_IOC_GET_QUEUE_WAVE_STATE \
524 AMDKFD_IOWR(0x1B, struct kfd_ioctl_get_queue_wave_state_args)
526 #define AMDKFD_IOC_GET_DMABUF_INFO \
527 AMDKFD_IOWR(0x1C, struct kfd_ioctl_get_dmabuf_info_args)
529 #define AMDKFD_IOC_IMPORT_DMABUF \
530 AMDKFD_IOWR(0x1D, struct kfd_ioctl_import_dmabuf_args)
532 #define AMDKFD_COMMAND_START 0x01
533 #define AMDKFD_COMMAND_END 0x1E