Linux-libre 5.4.47-gnu
[librecmc/linux-libre.git] / drivers / media / v4l2-core / v4l2-mem2mem.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Memory-to-memory device framework for Video for Linux 2 and videobuf.
4  *
5  * Helper functions for devices that use videobuf buffers for both their
6  * source and destination.
7  *
8  * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
9  * Pawel Osciak, <pawel@osciak.com>
10  * Marek Szyprowski, <m.szyprowski@samsung.com>
11  */
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15
16 #include <media/media-device.h>
17 #include <media/videobuf2-v4l2.h>
18 #include <media/v4l2-mem2mem.h>
19 #include <media/v4l2-dev.h>
20 #include <media/v4l2-device.h>
21 #include <media/v4l2-fh.h>
22 #include <media/v4l2-event.h>
23
24 MODULE_DESCRIPTION("Mem to mem device framework for videobuf");
25 MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
26 MODULE_LICENSE("GPL");
27
28 static bool debug;
29 module_param(debug, bool, 0644);
30
31 #define dprintk(fmt, arg...)                                            \
32         do {                                                            \
33                 if (debug)                                              \
34                         printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\
35         } while (0)
36
37
38 /* Instance is already queued on the job_queue */
39 #define TRANS_QUEUED            (1 << 0)
40 /* Instance is currently running in hardware */
41 #define TRANS_RUNNING           (1 << 1)
42 /* Instance is currently aborting */
43 #define TRANS_ABORT             (1 << 2)
44
45
46 /* Offset base for buffers on the destination queue - used to distinguish
47  * between source and destination buffers when mmapping - they receive the same
48  * offsets but for different queues */
49 #define DST_QUEUE_OFF_BASE      (1 << 30)
50
51 enum v4l2_m2m_entity_type {
52         MEM2MEM_ENT_TYPE_SOURCE,
53         MEM2MEM_ENT_TYPE_SINK,
54         MEM2MEM_ENT_TYPE_PROC
55 };
56
57 static const char * const m2m_entity_name[] = {
58         "source",
59         "sink",
60         "proc"
61 };
62
63 /**
64  * struct v4l2_m2m_dev - per-device context
65  * @source:             &struct media_entity pointer with the source entity
66  *                      Used only when the M2M device is registered via
67  *                      v4l2_m2m_unregister_media_controller().
68  * @source_pad:         &struct media_pad with the source pad.
69  *                      Used only when the M2M device is registered via
70  *                      v4l2_m2m_unregister_media_controller().
71  * @sink:               &struct media_entity pointer with the sink entity
72  *                      Used only when the M2M device is registered via
73  *                      v4l2_m2m_unregister_media_controller().
74  * @sink_pad:           &struct media_pad with the sink pad.
75  *                      Used only when the M2M device is registered via
76  *                      v4l2_m2m_unregister_media_controller().
77  * @proc:               &struct media_entity pointer with the M2M device itself.
78  * @proc_pads:          &struct media_pad with the @proc pads.
79  *                      Used only when the M2M device is registered via
80  *                      v4l2_m2m_unregister_media_controller().
81  * @intf_devnode:       &struct media_intf devnode pointer with the interface
82  *                      with controls the M2M device.
83  * @curr_ctx:           currently running instance
84  * @job_queue:          instances queued to run
85  * @job_spinlock:       protects job_queue
86  * @job_work:           worker to run queued jobs.
87  * @m2m_ops:            driver callbacks
88  */
89 struct v4l2_m2m_dev {
90         struct v4l2_m2m_ctx     *curr_ctx;
91 #ifdef CONFIG_MEDIA_CONTROLLER
92         struct media_entity     *source;
93         struct media_pad        source_pad;
94         struct media_entity     sink;
95         struct media_pad        sink_pad;
96         struct media_entity     proc;
97         struct media_pad        proc_pads[2];
98         struct media_intf_devnode *intf_devnode;
99 #endif
100
101         struct list_head        job_queue;
102         spinlock_t              job_spinlock;
103         struct work_struct      job_work;
104
105         const struct v4l2_m2m_ops *m2m_ops;
106 };
107
108 static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,
109                                                 enum v4l2_buf_type type)
110 {
111         if (V4L2_TYPE_IS_OUTPUT(type))
112                 return &m2m_ctx->out_q_ctx;
113         else
114                 return &m2m_ctx->cap_q_ctx;
115 }
116
117 struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
118                                        enum v4l2_buf_type type)
119 {
120         struct v4l2_m2m_queue_ctx *q_ctx;
121
122         q_ctx = get_queue_ctx(m2m_ctx, type);
123         if (!q_ctx)
124                 return NULL;
125
126         return &q_ctx->q;
127 }
128 EXPORT_SYMBOL(v4l2_m2m_get_vq);
129
130 struct vb2_v4l2_buffer *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx)
131 {
132         struct v4l2_m2m_buffer *b;
133         unsigned long flags;
134
135         spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
136
137         if (list_empty(&q_ctx->rdy_queue)) {
138                 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
139                 return NULL;
140         }
141
142         b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
143         spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
144         return &b->vb;
145 }
146 EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf);
147
148 struct vb2_v4l2_buffer *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx)
149 {
150         struct v4l2_m2m_buffer *b;
151         unsigned long flags;
152
153         spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
154
155         if (list_empty(&q_ctx->rdy_queue)) {
156                 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
157                 return NULL;
158         }
159
160         b = list_last_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
161         spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
162         return &b->vb;
163 }
164 EXPORT_SYMBOL_GPL(v4l2_m2m_last_buf);
165
166 struct vb2_v4l2_buffer *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx)
167 {
168         struct v4l2_m2m_buffer *b;
169         unsigned long flags;
170
171         spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
172         if (list_empty(&q_ctx->rdy_queue)) {
173                 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
174                 return NULL;
175         }
176         b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
177         list_del(&b->list);
178         q_ctx->num_rdy--;
179         spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
180
181         return &b->vb;
182 }
183 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove);
184
185 void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx *q_ctx,
186                                 struct vb2_v4l2_buffer *vbuf)
187 {
188         struct v4l2_m2m_buffer *b;
189         unsigned long flags;
190
191         spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
192         b = container_of(vbuf, struct v4l2_m2m_buffer, vb);
193         list_del(&b->list);
194         q_ctx->num_rdy--;
195         spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
196 }
197 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_buf);
198
199 struct vb2_v4l2_buffer *
200 v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx *q_ctx, unsigned int idx)
201
202 {
203         struct v4l2_m2m_buffer *b, *tmp;
204         struct vb2_v4l2_buffer *ret = NULL;
205         unsigned long flags;
206
207         spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
208         list_for_each_entry_safe(b, tmp, &q_ctx->rdy_queue, list) {
209                 if (b->vb.vb2_buf.index == idx) {
210                         list_del(&b->list);
211                         q_ctx->num_rdy--;
212                         ret = &b->vb;
213                         break;
214                 }
215         }
216         spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
217
218         return ret;
219 }
220 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_idx);
221
222 /*
223  * Scheduling handlers
224  */
225
226 void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev)
227 {
228         unsigned long flags;
229         void *ret = NULL;
230
231         spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
232         if (m2m_dev->curr_ctx)
233                 ret = m2m_dev->curr_ctx->priv;
234         spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
235
236         return ret;
237 }
238 EXPORT_SYMBOL(v4l2_m2m_get_curr_priv);
239
240 /**
241  * v4l2_m2m_try_run() - select next job to perform and run it if possible
242  * @m2m_dev: per-device context
243  *
244  * Get next transaction (if present) from the waiting jobs list and run it.
245  *
246  * Note that this function can run on a given v4l2_m2m_ctx context,
247  * but call .device_run for another context.
248  */
249 static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
250 {
251         unsigned long flags;
252
253         spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
254         if (NULL != m2m_dev->curr_ctx) {
255                 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
256                 dprintk("Another instance is running, won't run now\n");
257                 return;
258         }
259
260         if (list_empty(&m2m_dev->job_queue)) {
261                 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
262                 dprintk("No job pending\n");
263                 return;
264         }
265
266         m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue,
267                                    struct v4l2_m2m_ctx, queue);
268         m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING;
269         spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
270
271         dprintk("Running job on m2m_ctx: %p\n", m2m_dev->curr_ctx);
272         m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv);
273 }
274
275 /*
276  * __v4l2_m2m_try_queue() - queue a job
277  * @m2m_dev: m2m device
278  * @m2m_ctx: m2m context
279  *
280  * Check if this context is ready to queue a job.
281  *
282  * This function can run in interrupt context.
283  */
284 static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev,
285                                  struct v4l2_m2m_ctx *m2m_ctx)
286 {
287         unsigned long flags_job, flags_out, flags_cap;
288
289         dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
290
291         if (!m2m_ctx->out_q_ctx.q.streaming
292             || !m2m_ctx->cap_q_ctx.q.streaming) {
293                 dprintk("Streaming needs to be on for both queues\n");
294                 return;
295         }
296
297         spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
298
299         /* If the context is aborted then don't schedule it */
300         if (m2m_ctx->job_flags & TRANS_ABORT) {
301                 dprintk("Aborted context\n");
302                 goto job_unlock;
303         }
304
305         if (m2m_ctx->job_flags & TRANS_QUEUED) {
306                 dprintk("On job queue already\n");
307                 goto job_unlock;
308         }
309
310         spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
311         if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)
312             && !m2m_ctx->out_q_ctx.buffered) {
313                 dprintk("No input buffers available\n");
314                 goto out_unlock;
315         }
316         spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
317         if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)
318             && !m2m_ctx->cap_q_ctx.buffered) {
319                 dprintk("No output buffers available\n");
320                 goto cap_unlock;
321         }
322         spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
323         spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
324
325         if (m2m_dev->m2m_ops->job_ready
326                 && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
327                 dprintk("Driver not ready\n");
328                 goto job_unlock;
329         }
330
331         list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue);
332         m2m_ctx->job_flags |= TRANS_QUEUED;
333
334         spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
335         return;
336
337 cap_unlock:
338         spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
339 out_unlock:
340         spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
341 job_unlock:
342         spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
343 }
344
345 /**
346  * v4l2_m2m_try_schedule() - schedule and possibly run a job for any context
347  * @m2m_ctx: m2m context
348  *
349  * Check if this context is ready to queue a job. If suitable,
350  * run the next queued job on the mem2mem device.
351  *
352  * This function shouldn't run in interrupt context.
353  *
354  * Note that v4l2_m2m_try_schedule() can schedule one job for this context,
355  * and then run another job for another context.
356  */
357 void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
358 {
359         struct v4l2_m2m_dev *m2m_dev = m2m_ctx->m2m_dev;
360
361         __v4l2_m2m_try_queue(m2m_dev, m2m_ctx);
362         v4l2_m2m_try_run(m2m_dev);
363 }
364 EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule);
365
366 /**
367  * v4l2_m2m_device_run_work() - run pending jobs for the context
368  * @work: Work structure used for scheduling the execution of this function.
369  */
370 static void v4l2_m2m_device_run_work(struct work_struct *work)
371 {
372         struct v4l2_m2m_dev *m2m_dev =
373                 container_of(work, struct v4l2_m2m_dev, job_work);
374
375         v4l2_m2m_try_run(m2m_dev);
376 }
377
378 /**
379  * v4l2_m2m_cancel_job() - cancel pending jobs for the context
380  * @m2m_ctx: m2m context with jobs to be canceled
381  *
382  * In case of streamoff or release called on any context,
383  * 1] If the context is currently running, then abort job will be called
384  * 2] If the context is queued, then the context will be removed from
385  *    the job_queue
386  */
387 static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx)
388 {
389         struct v4l2_m2m_dev *m2m_dev;
390         unsigned long flags;
391
392         m2m_dev = m2m_ctx->m2m_dev;
393         spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
394
395         m2m_ctx->job_flags |= TRANS_ABORT;
396         if (m2m_ctx->job_flags & TRANS_RUNNING) {
397                 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
398                 if (m2m_dev->m2m_ops->job_abort)
399                         m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
400                 dprintk("m2m_ctx %p running, will wait to complete\n", m2m_ctx);
401                 wait_event(m2m_ctx->finished,
402                                 !(m2m_ctx->job_flags & TRANS_RUNNING));
403         } else if (m2m_ctx->job_flags & TRANS_QUEUED) {
404                 list_del(&m2m_ctx->queue);
405                 m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
406                 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
407                 dprintk("m2m_ctx: %p had been on queue and was removed\n",
408                         m2m_ctx);
409         } else {
410                 /* Do nothing, was not on queue/running */
411                 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
412         }
413 }
414
415 void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
416                          struct v4l2_m2m_ctx *m2m_ctx)
417 {
418         unsigned long flags;
419
420         spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
421         if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) {
422                 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
423                 dprintk("Called by an instance not currently running\n");
424                 return;
425         }
426
427         list_del(&m2m_dev->curr_ctx->queue);
428         m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
429         wake_up(&m2m_dev->curr_ctx->finished);
430         m2m_dev->curr_ctx = NULL;
431
432         spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
433
434         /* This instance might have more buffers ready, but since we do not
435          * allow more than one job on the job_queue per instance, each has
436          * to be scheduled separately after the previous one finishes. */
437         __v4l2_m2m_try_queue(m2m_dev, m2m_ctx);
438
439         /* We might be running in atomic context,
440          * but the job must be run in non-atomic context.
441          */
442         schedule_work(&m2m_dev->job_work);
443 }
444 EXPORT_SYMBOL(v4l2_m2m_job_finish);
445
446 int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
447                      struct v4l2_requestbuffers *reqbufs)
448 {
449         struct vb2_queue *vq;
450         int ret;
451
452         vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type);
453         ret = vb2_reqbufs(vq, reqbufs);
454         /* If count == 0, then the owner has released all buffers and he
455            is no longer owner of the queue. Otherwise we have an owner. */
456         if (ret == 0)
457                 vq->owner = reqbufs->count ? file->private_data : NULL;
458
459         return ret;
460 }
461 EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
462
463 int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
464                       struct v4l2_buffer *buf)
465 {
466         struct vb2_queue *vq;
467         int ret = 0;
468         unsigned int i;
469
470         vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
471         ret = vb2_querybuf(vq, buf);
472
473         /* Adjust MMAP memory offsets for the CAPTURE queue */
474         if (buf->memory == V4L2_MEMORY_MMAP && !V4L2_TYPE_IS_OUTPUT(vq->type)) {
475                 if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) {
476                         for (i = 0; i < buf->length; ++i)
477                                 buf->m.planes[i].m.mem_offset
478                                         += DST_QUEUE_OFF_BASE;
479                 } else {
480                         buf->m.offset += DST_QUEUE_OFF_BASE;
481                 }
482         }
483
484         return ret;
485 }
486 EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
487
488 int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
489                   struct v4l2_buffer *buf)
490 {
491         struct video_device *vdev = video_devdata(file);
492         struct vb2_queue *vq;
493         int ret;
494
495         vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
496         if (!V4L2_TYPE_IS_OUTPUT(vq->type) &&
497             (buf->flags & V4L2_BUF_FLAG_REQUEST_FD)) {
498                 dprintk("%s: requests cannot be used with capture buffers\n",
499                         __func__);
500                 return -EPERM;
501         }
502         ret = vb2_qbuf(vq, vdev->v4l2_dev->mdev, buf);
503         if (!ret && !(buf->flags & V4L2_BUF_FLAG_IN_REQUEST))
504                 v4l2_m2m_try_schedule(m2m_ctx);
505
506         return ret;
507 }
508 EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf);
509
510 int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
511                    struct v4l2_buffer *buf)
512 {
513         struct vb2_queue *vq;
514
515         vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
516         return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
517 }
518 EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
519
520 int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
521                          struct v4l2_buffer *buf)
522 {
523         struct video_device *vdev = video_devdata(file);
524         struct vb2_queue *vq;
525
526         vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
527         return vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf);
528 }
529 EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf);
530
531 int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
532                          struct v4l2_create_buffers *create)
533 {
534         struct vb2_queue *vq;
535
536         vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type);
537         return vb2_create_bufs(vq, create);
538 }
539 EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs);
540
541 int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
542                   struct v4l2_exportbuffer *eb)
543 {
544         struct vb2_queue *vq;
545
546         vq = v4l2_m2m_get_vq(m2m_ctx, eb->type);
547         return vb2_expbuf(vq, eb);
548 }
549 EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf);
550
551 int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
552                       enum v4l2_buf_type type)
553 {
554         struct vb2_queue *vq;
555         int ret;
556
557         vq = v4l2_m2m_get_vq(m2m_ctx, type);
558         ret = vb2_streamon(vq, type);
559         if (!ret)
560                 v4l2_m2m_try_schedule(m2m_ctx);
561
562         return ret;
563 }
564 EXPORT_SYMBOL_GPL(v4l2_m2m_streamon);
565
566 int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
567                        enum v4l2_buf_type type)
568 {
569         struct v4l2_m2m_dev *m2m_dev;
570         struct v4l2_m2m_queue_ctx *q_ctx;
571         unsigned long flags_job, flags;
572         int ret;
573
574         /* wait until the current context is dequeued from job_queue */
575         v4l2_m2m_cancel_job(m2m_ctx);
576
577         q_ctx = get_queue_ctx(m2m_ctx, type);
578         ret = vb2_streamoff(&q_ctx->q, type);
579         if (ret)
580                 return ret;
581
582         m2m_dev = m2m_ctx->m2m_dev;
583         spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
584         /* We should not be scheduled anymore, since we're dropping a queue. */
585         if (m2m_ctx->job_flags & TRANS_QUEUED)
586                 list_del(&m2m_ctx->queue);
587         m2m_ctx->job_flags = 0;
588
589         spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
590         /* Drop queue, since streamoff returns device to the same state as after
591          * calling reqbufs. */
592         INIT_LIST_HEAD(&q_ctx->rdy_queue);
593         q_ctx->num_rdy = 0;
594         spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
595
596         if (m2m_dev->curr_ctx == m2m_ctx) {
597                 m2m_dev->curr_ctx = NULL;
598                 wake_up(&m2m_ctx->finished);
599         }
600         spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
601
602         return 0;
603 }
604 EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff);
605
606 static __poll_t v4l2_m2m_poll_for_data(struct file *file,
607                                        struct v4l2_m2m_ctx *m2m_ctx,
608                                        struct poll_table_struct *wait)
609 {
610         struct vb2_queue *src_q, *dst_q;
611         struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
612         __poll_t rc = 0;
613         unsigned long flags;
614
615         src_q = v4l2_m2m_get_src_vq(m2m_ctx);
616         dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
617
618         poll_wait(file, &src_q->done_wq, wait);
619         poll_wait(file, &dst_q->done_wq, wait);
620
621         /*
622          * There has to be at least one buffer queued on each queued_list, which
623          * means either in driver already or waiting for driver to claim it
624          * and start processing.
625          */
626         if ((!src_q->streaming || src_q->error ||
627              list_empty(&src_q->queued_list)) &&
628             (!dst_q->streaming || dst_q->error ||
629              list_empty(&dst_q->queued_list)))
630                 return EPOLLERR;
631
632         spin_lock_irqsave(&dst_q->done_lock, flags);
633         if (list_empty(&dst_q->done_list)) {
634                 /*
635                  * If the last buffer was dequeued from the capture queue,
636                  * return immediately. DQBUF will return -EPIPE.
637                  */
638                 if (dst_q->last_buffer_dequeued) {
639                         spin_unlock_irqrestore(&dst_q->done_lock, flags);
640                         return EPOLLIN | EPOLLRDNORM;
641                 }
642         }
643         spin_unlock_irqrestore(&dst_q->done_lock, flags);
644
645         spin_lock_irqsave(&src_q->done_lock, flags);
646         if (!list_empty(&src_q->done_list))
647                 src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer,
648                                                 done_entry);
649         if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE
650                         || src_vb->state == VB2_BUF_STATE_ERROR))
651                 rc |= EPOLLOUT | EPOLLWRNORM;
652         spin_unlock_irqrestore(&src_q->done_lock, flags);
653
654         spin_lock_irqsave(&dst_q->done_lock, flags);
655         if (!list_empty(&dst_q->done_list))
656                 dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer,
657                                                 done_entry);
658         if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE
659                         || dst_vb->state == VB2_BUF_STATE_ERROR))
660                 rc |= EPOLLIN | EPOLLRDNORM;
661         spin_unlock_irqrestore(&dst_q->done_lock, flags);
662
663         return rc;
664 }
665
666 __poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
667                        struct poll_table_struct *wait)
668 {
669         struct video_device *vfd = video_devdata(file);
670         __poll_t req_events = poll_requested_events(wait);
671         __poll_t rc = 0;
672
673         if (req_events & (EPOLLOUT | EPOLLWRNORM | EPOLLIN | EPOLLRDNORM))
674                 rc = v4l2_m2m_poll_for_data(file, m2m_ctx, wait);
675
676         if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
677                 struct v4l2_fh *fh = file->private_data;
678
679                 poll_wait(file, &fh->wait, wait);
680                 if (v4l2_event_pending(fh))
681                         rc |= EPOLLPRI;
682         }
683
684         return rc;
685 }
686 EXPORT_SYMBOL_GPL(v4l2_m2m_poll);
687
688 int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
689                          struct vm_area_struct *vma)
690 {
691         unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
692         struct vb2_queue *vq;
693
694         if (offset < DST_QUEUE_OFF_BASE) {
695                 vq = v4l2_m2m_get_src_vq(m2m_ctx);
696         } else {
697                 vq = v4l2_m2m_get_dst_vq(m2m_ctx);
698                 vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
699         }
700
701         return vb2_mmap(vq, vma);
702 }
703 EXPORT_SYMBOL(v4l2_m2m_mmap);
704
705 #if defined(CONFIG_MEDIA_CONTROLLER)
706 void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev)
707 {
708         media_remove_intf_links(&m2m_dev->intf_devnode->intf);
709         media_devnode_remove(m2m_dev->intf_devnode);
710
711         media_entity_remove_links(m2m_dev->source);
712         media_entity_remove_links(&m2m_dev->sink);
713         media_entity_remove_links(&m2m_dev->proc);
714         media_device_unregister_entity(m2m_dev->source);
715         media_device_unregister_entity(&m2m_dev->sink);
716         media_device_unregister_entity(&m2m_dev->proc);
717         kfree(m2m_dev->source->name);
718         kfree(m2m_dev->sink.name);
719         kfree(m2m_dev->proc.name);
720 }
721 EXPORT_SYMBOL_GPL(v4l2_m2m_unregister_media_controller);
722
723 static int v4l2_m2m_register_entity(struct media_device *mdev,
724         struct v4l2_m2m_dev *m2m_dev, enum v4l2_m2m_entity_type type,
725         struct video_device *vdev, int function)
726 {
727         struct media_entity *entity;
728         struct media_pad *pads;
729         char *name;
730         unsigned int len;
731         int num_pads;
732         int ret;
733
734         switch (type) {
735         case MEM2MEM_ENT_TYPE_SOURCE:
736                 entity = m2m_dev->source;
737                 pads = &m2m_dev->source_pad;
738                 pads[0].flags = MEDIA_PAD_FL_SOURCE;
739                 num_pads = 1;
740                 break;
741         case MEM2MEM_ENT_TYPE_SINK:
742                 entity = &m2m_dev->sink;
743                 pads = &m2m_dev->sink_pad;
744                 pads[0].flags = MEDIA_PAD_FL_SINK;
745                 num_pads = 1;
746                 break;
747         case MEM2MEM_ENT_TYPE_PROC:
748                 entity = &m2m_dev->proc;
749                 pads = m2m_dev->proc_pads;
750                 pads[0].flags = MEDIA_PAD_FL_SINK;
751                 pads[1].flags = MEDIA_PAD_FL_SOURCE;
752                 num_pads = 2;
753                 break;
754         default:
755                 return -EINVAL;
756         }
757
758         entity->obj_type = MEDIA_ENTITY_TYPE_BASE;
759         if (type != MEM2MEM_ENT_TYPE_PROC) {
760                 entity->info.dev.major = VIDEO_MAJOR;
761                 entity->info.dev.minor = vdev->minor;
762         }
763         len = strlen(vdev->name) + 2 + strlen(m2m_entity_name[type]);
764         name = kmalloc(len, GFP_KERNEL);
765         if (!name)
766                 return -ENOMEM;
767         snprintf(name, len, "%s-%s", vdev->name, m2m_entity_name[type]);
768         entity->name = name;
769         entity->function = function;
770
771         ret = media_entity_pads_init(entity, num_pads, pads);
772         if (ret)
773                 return ret;
774         ret = media_device_register_entity(mdev, entity);
775         if (ret)
776                 return ret;
777
778         return 0;
779 }
780
781 int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev,
782                 struct video_device *vdev, int function)
783 {
784         struct media_device *mdev = vdev->v4l2_dev->mdev;
785         struct media_link *link;
786         int ret;
787
788         if (!mdev)
789                 return 0;
790
791         /* A memory-to-memory device consists in two
792          * DMA engine and one video processing entities.
793          * The DMA engine entities are linked to a V4L interface
794          */
795
796         /* Create the three entities with their pads */
797         m2m_dev->source = &vdev->entity;
798         ret = v4l2_m2m_register_entity(mdev, m2m_dev,
799                         MEM2MEM_ENT_TYPE_SOURCE, vdev, MEDIA_ENT_F_IO_V4L);
800         if (ret)
801                 return ret;
802         ret = v4l2_m2m_register_entity(mdev, m2m_dev,
803                         MEM2MEM_ENT_TYPE_PROC, vdev, function);
804         if (ret)
805                 goto err_rel_entity0;
806         ret = v4l2_m2m_register_entity(mdev, m2m_dev,
807                         MEM2MEM_ENT_TYPE_SINK, vdev, MEDIA_ENT_F_IO_V4L);
808         if (ret)
809                 goto err_rel_entity1;
810
811         /* Connect the three entities */
812         ret = media_create_pad_link(m2m_dev->source, 0, &m2m_dev->proc, 0,
813                         MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
814         if (ret)
815                 goto err_rel_entity2;
816
817         ret = media_create_pad_link(&m2m_dev->proc, 1, &m2m_dev->sink, 0,
818                         MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
819         if (ret)
820                 goto err_rm_links0;
821
822         /* Create video interface */
823         m2m_dev->intf_devnode = media_devnode_create(mdev,
824                         MEDIA_INTF_T_V4L_VIDEO, 0,
825                         VIDEO_MAJOR, vdev->minor);
826         if (!m2m_dev->intf_devnode) {
827                 ret = -ENOMEM;
828                 goto err_rm_links1;
829         }
830
831         /* Connect the two DMA engines to the interface */
832         link = media_create_intf_link(m2m_dev->source,
833                         &m2m_dev->intf_devnode->intf,
834                         MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
835         if (!link) {
836                 ret = -ENOMEM;
837                 goto err_rm_devnode;
838         }
839
840         link = media_create_intf_link(&m2m_dev->sink,
841                         &m2m_dev->intf_devnode->intf,
842                         MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
843         if (!link) {
844                 ret = -ENOMEM;
845                 goto err_rm_intf_link;
846         }
847         return 0;
848
849 err_rm_intf_link:
850         media_remove_intf_links(&m2m_dev->intf_devnode->intf);
851 err_rm_devnode:
852         media_devnode_remove(m2m_dev->intf_devnode);
853 err_rm_links1:
854         media_entity_remove_links(&m2m_dev->sink);
855 err_rm_links0:
856         media_entity_remove_links(&m2m_dev->proc);
857         media_entity_remove_links(m2m_dev->source);
858 err_rel_entity2:
859         media_device_unregister_entity(&m2m_dev->proc);
860         kfree(m2m_dev->proc.name);
861 err_rel_entity1:
862         media_device_unregister_entity(&m2m_dev->sink);
863         kfree(m2m_dev->sink.name);
864 err_rel_entity0:
865         media_device_unregister_entity(m2m_dev->source);
866         kfree(m2m_dev->source->name);
867         return ret;
868         return 0;
869 }
870 EXPORT_SYMBOL_GPL(v4l2_m2m_register_media_controller);
871 #endif
872
873 struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops)
874 {
875         struct v4l2_m2m_dev *m2m_dev;
876
877         if (!m2m_ops || WARN_ON(!m2m_ops->device_run))
878                 return ERR_PTR(-EINVAL);
879
880         m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL);
881         if (!m2m_dev)
882                 return ERR_PTR(-ENOMEM);
883
884         m2m_dev->curr_ctx = NULL;
885         m2m_dev->m2m_ops = m2m_ops;
886         INIT_LIST_HEAD(&m2m_dev->job_queue);
887         spin_lock_init(&m2m_dev->job_spinlock);
888         INIT_WORK(&m2m_dev->job_work, v4l2_m2m_device_run_work);
889
890         return m2m_dev;
891 }
892 EXPORT_SYMBOL_GPL(v4l2_m2m_init);
893
894 void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev)
895 {
896         kfree(m2m_dev);
897 }
898 EXPORT_SYMBOL_GPL(v4l2_m2m_release);
899
900 struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
901                 void *drv_priv,
902                 int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq))
903 {
904         struct v4l2_m2m_ctx *m2m_ctx;
905         struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx;
906         int ret;
907
908         m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL);
909         if (!m2m_ctx)
910                 return ERR_PTR(-ENOMEM);
911
912         m2m_ctx->priv = drv_priv;
913         m2m_ctx->m2m_dev = m2m_dev;
914         init_waitqueue_head(&m2m_ctx->finished);
915
916         out_q_ctx = &m2m_ctx->out_q_ctx;
917         cap_q_ctx = &m2m_ctx->cap_q_ctx;
918
919         INIT_LIST_HEAD(&out_q_ctx->rdy_queue);
920         INIT_LIST_HEAD(&cap_q_ctx->rdy_queue);
921         spin_lock_init(&out_q_ctx->rdy_spinlock);
922         spin_lock_init(&cap_q_ctx->rdy_spinlock);
923
924         INIT_LIST_HEAD(&m2m_ctx->queue);
925
926         ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q);
927
928         if (ret)
929                 goto err;
930         /*
931          * Both queues should use same the mutex to lock the m2m context.
932          * This lock is used in some v4l2_m2m_* helpers.
933          */
934         if (WARN_ON(out_q_ctx->q.lock != cap_q_ctx->q.lock)) {
935                 ret = -EINVAL;
936                 goto err;
937         }
938         m2m_ctx->q_lock = out_q_ctx->q.lock;
939
940         return m2m_ctx;
941 err:
942         kfree(m2m_ctx);
943         return ERR_PTR(ret);
944 }
945 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init);
946
947 void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
948 {
949         /* wait until the current context is dequeued from job_queue */
950         v4l2_m2m_cancel_job(m2m_ctx);
951
952         vb2_queue_release(&m2m_ctx->cap_q_ctx.q);
953         vb2_queue_release(&m2m_ctx->out_q_ctx.q);
954
955         kfree(m2m_ctx);
956 }
957 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release);
958
959 void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
960                 struct vb2_v4l2_buffer *vbuf)
961 {
962         struct v4l2_m2m_buffer *b = container_of(vbuf,
963                                 struct v4l2_m2m_buffer, vb);
964         struct v4l2_m2m_queue_ctx *q_ctx;
965         unsigned long flags;
966
967         q_ctx = get_queue_ctx(m2m_ctx, vbuf->vb2_buf.vb2_queue->type);
968         if (!q_ctx)
969                 return;
970
971         spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
972         list_add_tail(&b->list, &q_ctx->rdy_queue);
973         q_ctx->num_rdy++;
974         spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
975 }
976 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);
977
978 void v4l2_m2m_buf_copy_metadata(const struct vb2_v4l2_buffer *out_vb,
979                                 struct vb2_v4l2_buffer *cap_vb,
980                                 bool copy_frame_flags)
981 {
982         u32 mask = V4L2_BUF_FLAG_TIMECODE | V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
983
984         if (copy_frame_flags)
985                 mask |= V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_PFRAME |
986                         V4L2_BUF_FLAG_BFRAME;
987
988         cap_vb->vb2_buf.timestamp = out_vb->vb2_buf.timestamp;
989
990         if (out_vb->flags & V4L2_BUF_FLAG_TIMECODE)
991                 cap_vb->timecode = out_vb->timecode;
992         cap_vb->field = out_vb->field;
993         cap_vb->flags &= ~mask;
994         cap_vb->flags |= out_vb->flags & mask;
995         cap_vb->vb2_buf.copied_timestamp = 1;
996 }
997 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_copy_metadata);
998
999 void v4l2_m2m_request_queue(struct media_request *req)
1000 {
1001         struct media_request_object *obj, *obj_safe;
1002         struct v4l2_m2m_ctx *m2m_ctx = NULL;
1003
1004         /*
1005          * Queue all objects. Note that buffer objects are at the end of the
1006          * objects list, after all other object types. Once buffer objects
1007          * are queued, the driver might delete them immediately (if the driver
1008          * processes the buffer at once), so we have to use
1009          * list_for_each_entry_safe() to handle the case where the object we
1010          * queue is deleted.
1011          */
1012         list_for_each_entry_safe(obj, obj_safe, &req->objects, list) {
1013                 struct v4l2_m2m_ctx *m2m_ctx_obj;
1014                 struct vb2_buffer *vb;
1015
1016                 if (!obj->ops->queue)
1017                         continue;
1018
1019                 if (vb2_request_object_is_buffer(obj)) {
1020                         /* Sanity checks */
1021                         vb = container_of(obj, struct vb2_buffer, req_obj);
1022                         WARN_ON(!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type));
1023                         m2m_ctx_obj = container_of(vb->vb2_queue,
1024                                                    struct v4l2_m2m_ctx,
1025                                                    out_q_ctx.q);
1026                         WARN_ON(m2m_ctx && m2m_ctx_obj != m2m_ctx);
1027                         m2m_ctx = m2m_ctx_obj;
1028                 }
1029
1030                 /*
1031                  * The buffer we queue here can in theory be immediately
1032                  * unbound, hence the use of list_for_each_entry_safe()
1033                  * above and why we call the queue op last.
1034                  */
1035                 obj->ops->queue(obj);
1036         }
1037
1038         WARN_ON(!m2m_ctx);
1039
1040         if (m2m_ctx)
1041                 v4l2_m2m_try_schedule(m2m_ctx);
1042 }
1043 EXPORT_SYMBOL_GPL(v4l2_m2m_request_queue);
1044
1045 /* Videobuf2 ioctl helpers */
1046
1047 int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
1048                                 struct v4l2_requestbuffers *rb)
1049 {
1050         struct v4l2_fh *fh = file->private_data;
1051
1052         return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb);
1053 }
1054 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs);
1055
1056 int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv,
1057                                 struct v4l2_create_buffers *create)
1058 {
1059         struct v4l2_fh *fh = file->private_data;
1060
1061         return v4l2_m2m_create_bufs(file, fh->m2m_ctx, create);
1062 }
1063 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs);
1064
1065 int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv,
1066                                 struct v4l2_buffer *buf)
1067 {
1068         struct v4l2_fh *fh = file->private_data;
1069
1070         return v4l2_m2m_querybuf(file, fh->m2m_ctx, buf);
1071 }
1072 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf);
1073
1074 int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv,
1075                                 struct v4l2_buffer *buf)
1076 {
1077         struct v4l2_fh *fh = file->private_data;
1078
1079         return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf);
1080 }
1081 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf);
1082
1083 int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv,
1084                                 struct v4l2_buffer *buf)
1085 {
1086         struct v4l2_fh *fh = file->private_data;
1087
1088         return v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf);
1089 }
1090 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf);
1091
1092 int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *priv,
1093                                struct v4l2_buffer *buf)
1094 {
1095         struct v4l2_fh *fh = file->private_data;
1096
1097         return v4l2_m2m_prepare_buf(file, fh->m2m_ctx, buf);
1098 }
1099 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_prepare_buf);
1100
1101 int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv,
1102                                 struct v4l2_exportbuffer *eb)
1103 {
1104         struct v4l2_fh *fh = file->private_data;
1105
1106         return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb);
1107 }
1108 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf);
1109
1110 int v4l2_m2m_ioctl_streamon(struct file *file, void *priv,
1111                                 enum v4l2_buf_type type)
1112 {
1113         struct v4l2_fh *fh = file->private_data;
1114
1115         return v4l2_m2m_streamon(file, fh->m2m_ctx, type);
1116 }
1117 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon);
1118
1119 int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv,
1120                                 enum v4l2_buf_type type)
1121 {
1122         struct v4l2_fh *fh = file->private_data;
1123
1124         return v4l2_m2m_streamoff(file, fh->m2m_ctx, type);
1125 }
1126 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff);
1127
1128 int v4l2_m2m_ioctl_try_encoder_cmd(struct file *file, void *fh,
1129                                    struct v4l2_encoder_cmd *ec)
1130 {
1131         if (ec->cmd != V4L2_ENC_CMD_STOP && ec->cmd != V4L2_ENC_CMD_START)
1132                 return -EINVAL;
1133
1134         ec->flags = 0;
1135         return 0;
1136 }
1137 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_encoder_cmd);
1138
1139 int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *fh,
1140                                    struct v4l2_decoder_cmd *dc)
1141 {
1142         if (dc->cmd != V4L2_DEC_CMD_STOP && dc->cmd != V4L2_DEC_CMD_START)
1143                 return -EINVAL;
1144
1145         dc->flags = 0;
1146
1147         if (dc->cmd == V4L2_DEC_CMD_STOP) {
1148                 dc->stop.pts = 0;
1149         } else if (dc->cmd == V4L2_DEC_CMD_START) {
1150                 dc->start.speed = 0;
1151                 dc->start.format = V4L2_DEC_START_FMT_NONE;
1152         }
1153         return 0;
1154 }
1155 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_decoder_cmd);
1156
1157 /*
1158  * v4l2_file_operations helpers. It is assumed here same lock is used
1159  * for the output and the capture buffer queue.
1160  */
1161
1162 int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma)
1163 {
1164         struct v4l2_fh *fh = file->private_data;
1165
1166         return v4l2_m2m_mmap(file, fh->m2m_ctx, vma);
1167 }
1168 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap);
1169
1170 __poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait)
1171 {
1172         struct v4l2_fh *fh = file->private_data;
1173         struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx;
1174         __poll_t ret;
1175
1176         if (m2m_ctx->q_lock)
1177                 mutex_lock(m2m_ctx->q_lock);
1178
1179         ret = v4l2_m2m_poll(file, m2m_ctx, wait);
1180
1181         if (m2m_ctx->q_lock)
1182                 mutex_unlock(m2m_ctx->q_lock);
1183
1184         return ret;
1185 }
1186 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll);
1187