1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
7 #include <linux/sync_file.h>
12 #include "msm_gpu_trace.h"
15 * Cmdstream submission:
18 /* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
19 #define BO_VALID 0x8000 /* is current addr in cmdstream correct/valid? */
20 #define BO_LOCKED 0x4000
21 #define BO_PINNED 0x2000
23 static struct msm_gem_submit *submit_create(struct drm_device *dev,
24 struct msm_gpu *gpu, struct msm_gem_address_space *aspace,
25 struct msm_gpu_submitqueue *queue, uint32_t nr_bos,
28 struct msm_gem_submit *submit;
29 uint64_t sz = sizeof(*submit) + ((u64)nr_bos * sizeof(submit->bos[0])) +
30 ((u64)nr_cmds * sizeof(submit->cmd[0]));
35 submit = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
40 submit->aspace = aspace;
43 submit->cmd = (void *)&submit->bos[nr_bos];
44 submit->queue = queue;
45 submit->ring = gpu->rb[queue->prio];
47 /* initially, until copy_from_user() and bo lookup succeeds: */
51 INIT_LIST_HEAD(&submit->node);
52 INIT_LIST_HEAD(&submit->bo_list);
53 ww_acquire_init(&submit->ticket, &reservation_ww_class);
58 void msm_gem_submit_free(struct msm_gem_submit *submit)
60 dma_fence_put(submit->fence);
61 list_del(&submit->node);
63 msm_submitqueue_put(submit->queue);
68 static int submit_lookup_objects(struct msm_gem_submit *submit,
69 struct drm_msm_gem_submit *args, struct drm_file *file)
74 for (i = 0; i < args->nr_bos; i++) {
75 struct drm_msm_gem_submit_bo submit_bo;
76 void __user *userptr =
77 u64_to_user_ptr(args->bos + (i * sizeof(submit_bo)));
79 /* make sure we don't have garbage flags, in case we hit
80 * error path before flags is initialized:
82 submit->bos[i].flags = 0;
84 if (copy_from_user(&submit_bo, userptr, sizeof(submit_bo))) {
90 /* at least one of READ and/or WRITE flags should be set: */
91 #define MANDATORY_FLAGS (MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE)
93 if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
94 !(submit_bo.flags & MANDATORY_FLAGS)) {
95 DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
101 submit->bos[i].handle = submit_bo.handle;
102 submit->bos[i].flags = submit_bo.flags;
103 /* in validate_objects() we figure out if this is true: */
104 submit->bos[i].iova = submit_bo.presumed;
107 spin_lock(&file->table_lock);
109 for (i = 0; i < args->nr_bos; i++) {
110 struct drm_gem_object *obj;
111 struct msm_gem_object *msm_obj;
113 /* normally use drm_gem_object_lookup(), but for bulk lookup
114 * all under single table_lock just hit object_idr directly:
116 obj = idr_find(&file->object_idr, submit->bos[i].handle);
118 DRM_ERROR("invalid handle %u at index %u\n", submit->bos[i].handle, i);
123 msm_obj = to_msm_bo(obj);
125 if (!list_empty(&msm_obj->submit_entry)) {
126 DRM_ERROR("handle %u at index %u already on submit list\n",
127 submit->bos[i].handle, i);
132 drm_gem_object_get(obj);
134 submit->bos[i].obj = msm_obj;
136 list_add_tail(&msm_obj->submit_entry, &submit->bo_list);
140 spin_unlock(&file->table_lock);
148 static void submit_unlock_unpin_bo(struct msm_gem_submit *submit,
151 struct msm_gem_object *msm_obj = submit->bos[i].obj;
153 if (submit->bos[i].flags & BO_PINNED)
154 msm_gem_unpin_iova(&msm_obj->base, submit->aspace);
156 if (submit->bos[i].flags & BO_LOCKED)
157 ww_mutex_unlock(&msm_obj->base.resv->lock);
159 if (backoff && !(submit->bos[i].flags & BO_VALID))
160 submit->bos[i].iova = 0;
162 submit->bos[i].flags &= ~(BO_LOCKED | BO_PINNED);
165 /* This is where we make sure all the bo's are reserved and pin'd: */
166 static int submit_lock_objects(struct msm_gem_submit *submit)
168 int contended, slow_locked = -1, i, ret = 0;
171 for (i = 0; i < submit->nr_bos; i++) {
172 struct msm_gem_object *msm_obj = submit->bos[i].obj;
174 if (slow_locked == i)
179 if (!(submit->bos[i].flags & BO_LOCKED)) {
180 ret = ww_mutex_lock_interruptible(&msm_obj->base.resv->lock,
184 submit->bos[i].flags |= BO_LOCKED;
188 ww_acquire_done(&submit->ticket);
194 submit_unlock_unpin_bo(submit, i, true);
197 submit_unlock_unpin_bo(submit, slow_locked, true);
199 if (ret == -EDEADLK) {
200 struct msm_gem_object *msm_obj = submit->bos[contended].obj;
201 /* we lost out in a seqno race, lock and retry.. */
202 ret = ww_mutex_lock_slow_interruptible(&msm_obj->base.resv->lock,
205 submit->bos[contended].flags |= BO_LOCKED;
206 slow_locked = contended;
214 static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
218 for (i = 0; i < submit->nr_bos; i++) {
219 struct msm_gem_object *msm_obj = submit->bos[i].obj;
220 bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
223 /* NOTE: _reserve_shared() must happen before
224 * _add_shared_fence(), which makes this a slightly
225 * strange place to call it. OTOH this is a
226 * convenient can-fail point to hook it in.
228 ret = reservation_object_reserve_shared(msm_obj->base.resv,
237 ret = msm_gem_sync_object(&msm_obj->base, submit->ring->fctx,
246 static int submit_pin_objects(struct msm_gem_submit *submit)
250 submit->valid = true;
252 for (i = 0; i < submit->nr_bos; i++) {
253 struct msm_gem_object *msm_obj = submit->bos[i].obj;
256 /* if locking succeeded, pin bo: */
257 ret = msm_gem_get_and_pin_iova(&msm_obj->base,
258 submit->aspace, &iova);
263 submit->bos[i].flags |= BO_PINNED;
265 if (iova == submit->bos[i].iova) {
266 submit->bos[i].flags |= BO_VALID;
268 submit->bos[i].iova = iova;
269 /* iova changed, so address in cmdstream is not valid: */
270 submit->bos[i].flags &= ~BO_VALID;
271 submit->valid = false;
278 static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
279 struct msm_gem_object **obj, uint64_t *iova, bool *valid)
281 if (idx >= submit->nr_bos) {
282 DRM_ERROR("invalid buffer index: %u (out of %u)\n",
283 idx, submit->nr_bos);
288 *obj = submit->bos[idx].obj;
290 *iova = submit->bos[idx].iova;
292 *valid = !!(submit->bos[idx].flags & BO_VALID);
297 /* process the reloc's and patch up the cmdstream as needed: */
298 static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *obj,
299 uint32_t offset, uint32_t nr_relocs, uint64_t relocs)
301 uint32_t i, last_offset = 0;
309 DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
313 /* For now, just map the entire thing. Eventually we probably
314 * to do it page-by-page, w/ kmap() if not vmap()d..
316 ptr = msm_gem_get_vaddr(&obj->base);
320 DBG("failed to map: %d", ret);
324 for (i = 0; i < nr_relocs; i++) {
325 struct drm_msm_gem_submit_reloc submit_reloc;
326 void __user *userptr =
327 u64_to_user_ptr(relocs + (i * sizeof(submit_reloc)));
332 if (copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc))) {
337 if (submit_reloc.submit_offset % 4) {
338 DRM_ERROR("non-aligned reloc offset: %u\n",
339 submit_reloc.submit_offset);
344 /* offset in dwords: */
345 off = submit_reloc.submit_offset / 4;
347 if ((off >= (obj->base.size / 4)) ||
348 (off < last_offset)) {
349 DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
354 ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid);
361 iova += submit_reloc.reloc_offset;
363 if (submit_reloc.shift < 0)
364 iova >>= -submit_reloc.shift;
366 iova <<= submit_reloc.shift;
368 ptr[off] = iova | submit_reloc.or;
374 msm_gem_put_vaddr(&obj->base);
379 static void submit_cleanup(struct msm_gem_submit *submit)
383 for (i = 0; i < submit->nr_bos; i++) {
384 struct msm_gem_object *msm_obj = submit->bos[i].obj;
385 submit_unlock_unpin_bo(submit, i, false);
386 list_del_init(&msm_obj->submit_entry);
387 drm_gem_object_put(&msm_obj->base);
390 ww_acquire_fini(&submit->ticket);
393 int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
394 struct drm_file *file)
396 static atomic_t ident = ATOMIC_INIT(0);
397 struct msm_drm_private *priv = dev->dev_private;
398 struct drm_msm_gem_submit *args = data;
399 struct msm_file_private *ctx = file->driver_priv;
400 struct msm_gem_submit *submit;
401 struct msm_gpu *gpu = priv->gpu;
402 struct sync_file *sync_file = NULL;
403 struct msm_gpu_submitqueue *queue;
404 struct msm_ringbuffer *ring;
405 int out_fence_fd = -1;
406 struct pid *pid = get_pid(task_pid(current));
412 /* for now, we just have 3d pipe.. eventually this would need to
413 * be more clever to dispatch to appropriate gpu module:
415 if (MSM_PIPE_ID(args->flags) != MSM_PIPE_3D0)
418 if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS)
421 if (args->flags & MSM_SUBMIT_SUDO) {
422 if (!IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) ||
423 !capable(CAP_SYS_RAWIO))
427 queue = msm_submitqueue_get(ctx, args->queueid);
431 /* Get a unique identifier for the submission for logging purposes */
432 submitid = atomic_inc_return(&ident) - 1;
434 ring = gpu->rb[queue->prio];
435 trace_msm_gpu_submit(pid_nr(pid), ring->id, submitid,
436 args->nr_bos, args->nr_cmds);
438 if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
439 struct dma_fence *in_fence;
441 in_fence = sync_file_get_fence(args->fence_fd);
447 * Wait if the fence is from a foreign context, or if the fence
448 * array contains any fence from a foreign context.
451 if (!dma_fence_match_context(in_fence, ring->fctx->context))
452 ret = dma_fence_wait(in_fence, true);
454 dma_fence_put(in_fence);
459 ret = mutex_lock_interruptible(&dev->struct_mutex);
463 if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
464 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
465 if (out_fence_fd < 0) {
471 submit = submit_create(dev, gpu, ctx->aspace, queue, args->nr_bos,
479 submit->ident = submitid;
481 if (args->flags & MSM_SUBMIT_SUDO)
482 submit->in_rb = true;
484 ret = submit_lookup_objects(submit, args, file);
488 ret = submit_lock_objects(submit);
492 ret = submit_fence_sync(submit, !!(args->flags & MSM_SUBMIT_NO_IMPLICIT));
496 ret = submit_pin_objects(submit);
500 for (i = 0; i < args->nr_cmds; i++) {
501 struct drm_msm_gem_submit_cmd submit_cmd;
502 void __user *userptr =
503 u64_to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
504 struct msm_gem_object *msm_obj;
507 ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
513 /* validate input from userspace: */
514 switch (submit_cmd.type) {
515 case MSM_SUBMIT_CMD_BUF:
516 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
517 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
520 DRM_ERROR("invalid type: %08x\n", submit_cmd.type);
525 ret = submit_bo(submit, submit_cmd.submit_idx,
526 &msm_obj, &iova, NULL);
530 if (submit_cmd.size % 4) {
531 DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
537 if (!submit_cmd.size ||
538 ((submit_cmd.size + submit_cmd.submit_offset) >
539 msm_obj->base.size)) {
540 DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size);
545 submit->cmd[i].type = submit_cmd.type;
546 submit->cmd[i].size = submit_cmd.size / 4;
547 submit->cmd[i].iova = iova + submit_cmd.submit_offset;
548 submit->cmd[i].idx = submit_cmd.submit_idx;
553 ret = submit_reloc(submit, msm_obj, submit_cmd.submit_offset,
554 submit_cmd.nr_relocs, submit_cmd.relocs);
561 submit->fence = msm_fence_alloc(ring->fctx);
562 if (IS_ERR(submit->fence)) {
563 ret = PTR_ERR(submit->fence);
564 submit->fence = NULL;
568 if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
569 sync_file = sync_file_create(submit->fence);
576 msm_gpu_submit(gpu, submit, ctx);
578 args->fence = submit->fence->seqno;
580 if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
581 fd_install(out_fence_fd, sync_file->file);
582 args->fence_fd = out_fence_fd;
586 submit_cleanup(submit);
588 msm_gem_submit_free(submit);
590 if (ret && (out_fence_fd >= 0))
591 put_unused_fd(out_fence_fd);
592 mutex_unlock(&dev->struct_mutex);