1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (C) 2015-2018 Broadcom */
4 #include <linux/mm_types.h>
6 #include <drm/drm_encoder.h>
7 #include <drm/drm_gem.h>
8 #include <drm/drm_gem_shmem_helper.h>
9 #include <drm/gpu_scheduler.h>
10 #include "uapi/drm/v3d_drm.h"
12 #define GMP_GRANULARITY (128 * 1024)
14 /* Enum for each of the V3D queues. */
23 #define V3D_MAX_QUEUES (V3D_CACHE_CLEAN + 1)
25 struct v3d_queue_state {
26 struct drm_gpu_scheduler sched;
33 struct drm_device drm;
35 /* Short representation (e.g. 33, 41) of the V3D tech version
42 struct platform_device *pdev;
43 void __iomem *hub_regs;
44 void __iomem *core_regs[3];
45 void __iomem *bridge_regs;
46 void __iomem *gca_regs;
48 struct reset_control *reset;
50 /* Virtual and DMA addresses of the single shared page table. */
54 /* Virtual and DMA addresses of the MMU's scratch page. When
55 * a read or write is invalid in the MMU, it will be
59 dma_addr_t mmu_scratch_paddr;
60 /* virtual address bits from V3D to the MMU. */
63 /* Number of V3D cores. */
66 /* Allocator managing the address space. All units are in
72 struct work_struct overflow_mem_work;
74 struct v3d_bin_job *bin_job;
75 struct v3d_render_job *render_job;
76 struct v3d_tfu_job *tfu_job;
77 struct v3d_csd_job *csd_job;
79 struct v3d_queue_state queue[V3D_MAX_QUEUES];
81 /* Spinlock used to synchronize the overflow memory
82 * management against bin job submission.
86 /* Protects bo_stats */
89 /* Lock taken when resetting the GPU, to keep multiple
90 * processes from trying to park the scheduler threads and
93 struct mutex reset_lock;
95 /* Lock taken when creating and pushing the GPU scheduler
96 * jobs, to keep the sched-fence seqnos in order.
98 struct mutex sched_lock;
100 /* Lock taken during a cache clean and when initiating an L2
101 * flush, to keep L2 flushes from interfering with the
102 * synchronous L2 cleans.
104 struct mutex cache_clean_lock;
112 static inline struct v3d_dev *
113 to_v3d_dev(struct drm_device *dev)
115 return (struct v3d_dev *)dev->dev_private;
119 v3d_has_csd(struct v3d_dev *v3d)
121 return v3d->ver >= 41;
124 /* The per-fd struct, which tracks the MMU mappings. */
125 struct v3d_file_priv {
128 struct drm_sched_entity sched_entity[V3D_MAX_QUEUES];
132 struct drm_gem_shmem_object base;
134 struct drm_mm_node node;
136 /* List entry for the BO's position in
137 * v3d_render_job->unref_list
139 struct list_head unref_head;
142 static inline struct v3d_bo *
143 to_v3d_bo(struct drm_gem_object *bo)
145 return (struct v3d_bo *)bo;
149 struct dma_fence base;
150 struct drm_device *dev;
151 /* v3d seqno for signaled() test */
153 enum v3d_queue queue;
156 static inline struct v3d_fence *
157 to_v3d_fence(struct dma_fence *fence)
159 return (struct v3d_fence *)fence;
162 #define V3D_READ(offset) readl(v3d->hub_regs + offset)
163 #define V3D_WRITE(offset, val) writel(val, v3d->hub_regs + offset)
165 #define V3D_BRIDGE_READ(offset) readl(v3d->bridge_regs + offset)
166 #define V3D_BRIDGE_WRITE(offset, val) writel(val, v3d->bridge_regs + offset)
168 #define V3D_GCA_READ(offset) readl(v3d->gca_regs + offset)
169 #define V3D_GCA_WRITE(offset, val) writel(val, v3d->gca_regs + offset)
171 #define V3D_CORE_READ(core, offset) readl(v3d->core_regs[core] + offset)
172 #define V3D_CORE_WRITE(core, offset, val) writel(val, v3d->core_regs[core] + offset)
175 struct drm_sched_job base;
177 struct kref refcount;
181 /* This is the array of BOs that were looked up at the start
184 struct drm_gem_object **bo;
187 /* Array of struct dma_fence * to block on before submitting this job.
190 unsigned long last_dep;
192 /* v3d fence to be signaled by IRQ handler when the job is complete. */
193 struct dma_fence *irq_fence;
195 /* scheduler fence for when the job is considered complete and
196 * the BO reservations can be released.
198 struct dma_fence *done_fence;
200 /* Callback for the freeing of the job on refcount going to 0. */
201 void (*free)(struct kref *ref);
207 /* GPU virtual addresses of the start/end of the CL job. */
210 u32 timedout_ctca, timedout_ctra;
212 /* Corresponding render job, for attaching our overflow memory. */
213 struct v3d_render_job *render;
215 /* Submitted tile memory allocation start/size, tile state. */
219 struct v3d_render_job {
222 /* GPU virtual addresses of the start/end of the CL job. */
225 u32 timedout_ctca, timedout_ctra;
227 /* List of overflow BOs used in the job that need to be
228 * released once the job is complete.
230 struct list_head unref_list;
236 struct drm_v3d_submit_tfu args;
242 u32 timedout_batches;
244 struct drm_v3d_submit_csd args;
248 * _wait_for - magic (register) wait macro
250 * Does the right thing for modeset paths when run under kdgb or similar atomic
251 * contexts. Note that it's important that we check the condition again after
252 * having timed out, since the timeout could be due to preemption or similar and
253 * we've never had a chance to check the condition before the timeout.
255 #define wait_for(COND, MS) ({ \
256 unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \
259 if (time_after(jiffies, timeout__)) { \
261 ret__ = -ETIMEDOUT; \
269 static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
271 /* nsecs_to_jiffies64() does not guard against overflow */
272 if (NSEC_PER_SEC % HZ &&
273 div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ)
274 return MAX_JIFFY_OFFSET;
276 return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
280 struct drm_gem_object *v3d_create_object(struct drm_device *dev, size_t size);
281 void v3d_free_object(struct drm_gem_object *gem_obj);
282 struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv,
284 int v3d_create_bo_ioctl(struct drm_device *dev, void *data,
285 struct drm_file *file_priv);
286 int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data,
287 struct drm_file *file_priv);
288 int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data,
289 struct drm_file *file_priv);
290 struct drm_gem_object *v3d_prime_import_sg_table(struct drm_device *dev,
291 struct dma_buf_attachment *attach,
292 struct sg_table *sgt);
295 int v3d_debugfs_init(struct drm_minor *minor);
298 extern const struct dma_fence_ops v3d_fence_ops;
299 struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue);
302 int v3d_gem_init(struct drm_device *dev);
303 void v3d_gem_destroy(struct drm_device *dev);
304 int v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
305 struct drm_file *file_priv);
306 int v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
307 struct drm_file *file_priv);
308 int v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
309 struct drm_file *file_priv);
310 int v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
311 struct drm_file *file_priv);
312 void v3d_job_put(struct v3d_job *job);
313 void v3d_reset(struct v3d_dev *v3d);
314 void v3d_invalidate_caches(struct v3d_dev *v3d);
315 void v3d_clean_caches(struct v3d_dev *v3d);
318 int v3d_irq_init(struct v3d_dev *v3d);
319 void v3d_irq_enable(struct v3d_dev *v3d);
320 void v3d_irq_disable(struct v3d_dev *v3d);
321 void v3d_irq_reset(struct v3d_dev *v3d);
324 int v3d_mmu_get_offset(struct drm_file *file_priv, struct v3d_bo *bo,
326 int v3d_mmu_set_page_table(struct v3d_dev *v3d);
327 void v3d_mmu_insert_ptes(struct v3d_bo *bo);
328 void v3d_mmu_remove_ptes(struct v3d_bo *bo);
331 int v3d_sched_init(struct v3d_dev *v3d);
332 void v3d_sched_fini(struct v3d_dev *v3d);