Linux-libre 5.4.49-gnu
[librecmc/linux-libre.git] / kernel / trace / blktrace.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
4  *
5  */
6 #include <linux/kernel.h>
7 #include <linux/blkdev.h>
8 #include <linux/blktrace_api.h>
9 #include <linux/percpu.h>
10 #include <linux/init.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/debugfs.h>
14 #include <linux/export.h>
15 #include <linux/time.h>
16 #include <linux/uaccess.h>
17 #include <linux/list.h>
18 #include <linux/blk-cgroup.h>
19
20 #include "../../block/blk.h"
21
22 #include <trace/events/block.h>
23
24 #include "trace_output.h"
25
26 #ifdef CONFIG_BLK_DEV_IO_TRACE
27
28 static unsigned int blktrace_seq __read_mostly = 1;
29
30 static struct trace_array *blk_tr;
31 static bool blk_tracer_enabled __read_mostly;
32
33 static LIST_HEAD(running_trace_list);
34 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(running_trace_lock);
35
36 /* Select an alternative, minimalistic output than the original one */
37 #define TRACE_BLK_OPT_CLASSIC   0x1
38 #define TRACE_BLK_OPT_CGROUP    0x2
39 #define TRACE_BLK_OPT_CGNAME    0x4
40
41 static struct tracer_opt blk_tracer_opts[] = {
42         /* Default disable the minimalistic output */
43         { TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) },
44 #ifdef CONFIG_BLK_CGROUP
45         { TRACER_OPT(blk_cgroup, TRACE_BLK_OPT_CGROUP) },
46         { TRACER_OPT(blk_cgname, TRACE_BLK_OPT_CGNAME) },
47 #endif
48         { }
49 };
50
51 static struct tracer_flags blk_tracer_flags = {
52         .val  = 0,
53         .opts = blk_tracer_opts,
54 };
55
56 /* Global reference count of probes */
57 static DEFINE_MUTEX(blk_probe_mutex);
58 static int blk_probes_ref;
59
60 static void blk_register_tracepoints(void);
61 static void blk_unregister_tracepoints(void);
62
63 /*
64  * Send out a notify message.
65  */
66 static void trace_note(struct blk_trace *bt, pid_t pid, int action,
67                        const void *data, size_t len,
68                        union kernfs_node_id *cgid)
69 {
70         struct blk_io_trace *t;
71         struct ring_buffer_event *event = NULL;
72         struct ring_buffer *buffer = NULL;
73         int pc = 0;
74         int cpu = smp_processor_id();
75         bool blk_tracer = blk_tracer_enabled;
76         ssize_t cgid_len = cgid ? sizeof(*cgid) : 0;
77
78         if (blk_tracer) {
79                 buffer = blk_tr->trace_buffer.buffer;
80                 pc = preempt_count();
81                 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
82                                                   sizeof(*t) + len + cgid_len,
83                                                   0, pc);
84                 if (!event)
85                         return;
86                 t = ring_buffer_event_data(event);
87                 goto record_it;
88         }
89
90         if (!bt->rchan)
91                 return;
92
93         t = relay_reserve(bt->rchan, sizeof(*t) + len + cgid_len);
94         if (t) {
95                 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
96                 t->time = ktime_to_ns(ktime_get());
97 record_it:
98                 t->device = bt->dev;
99                 t->action = action | (cgid ? __BLK_TN_CGROUP : 0);
100                 t->pid = pid;
101                 t->cpu = cpu;
102                 t->pdu_len = len + cgid_len;
103                 if (cgid)
104                         memcpy((void *)t + sizeof(*t), cgid, cgid_len);
105                 memcpy((void *) t + sizeof(*t) + cgid_len, data, len);
106
107                 if (blk_tracer)
108                         trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc);
109         }
110 }
111
112 /*
113  * Send out a notify for this process, if we haven't done so since a trace
114  * started
115  */
116 static void trace_note_tsk(struct task_struct *tsk)
117 {
118         unsigned long flags;
119         struct blk_trace *bt;
120
121         tsk->btrace_seq = blktrace_seq;
122         spin_lock_irqsave(&running_trace_lock, flags);
123         list_for_each_entry(bt, &running_trace_list, running_list) {
124                 trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm,
125                            sizeof(tsk->comm), NULL);
126         }
127         spin_unlock_irqrestore(&running_trace_lock, flags);
128 }
129
130 static void trace_note_time(struct blk_trace *bt)
131 {
132         struct timespec64 now;
133         unsigned long flags;
134         u32 words[2];
135
136         /* need to check user space to see if this breaks in y2038 or y2106 */
137         ktime_get_real_ts64(&now);
138         words[0] = (u32)now.tv_sec;
139         words[1] = now.tv_nsec;
140
141         local_irq_save(flags);
142         trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words), NULL);
143         local_irq_restore(flags);
144 }
145
146 void __trace_note_message(struct blk_trace *bt, struct blkcg *blkcg,
147         const char *fmt, ...)
148 {
149         int n;
150         va_list args;
151         unsigned long flags;
152         char *buf;
153
154         if (unlikely(bt->trace_state != Blktrace_running &&
155                      !blk_tracer_enabled))
156                 return;
157
158         /*
159          * If the BLK_TC_NOTIFY action mask isn't set, don't send any note
160          * message to the trace.
161          */
162         if (!(bt->act_mask & BLK_TC_NOTIFY))
163                 return;
164
165         local_irq_save(flags);
166         buf = this_cpu_ptr(bt->msg_data);
167         va_start(args, fmt);
168         n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
169         va_end(args);
170
171         if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
172                 blkcg = NULL;
173 #ifdef CONFIG_BLK_CGROUP
174         trace_note(bt, 0, BLK_TN_MESSAGE, buf, n,
175                 blkcg ? cgroup_get_kernfs_id(blkcg->css.cgroup) : NULL);
176 #else
177         trace_note(bt, 0, BLK_TN_MESSAGE, buf, n, NULL);
178 #endif
179         local_irq_restore(flags);
180 }
181 EXPORT_SYMBOL_GPL(__trace_note_message);
182
183 static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
184                          pid_t pid)
185 {
186         if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
187                 return 1;
188         if (sector && (sector < bt->start_lba || sector > bt->end_lba))
189                 return 1;
190         if (bt->pid && pid != bt->pid)
191                 return 1;
192
193         return 0;
194 }
195
196 /*
197  * Data direction bit lookup
198  */
199 static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
200                                  BLK_TC_ACT(BLK_TC_WRITE) };
201
202 #define BLK_TC_RAHEAD           BLK_TC_AHEAD
203 #define BLK_TC_PREFLUSH         BLK_TC_FLUSH
204
205 /* The ilog2() calls fall out because they're constant */
206 #define MASK_TC_BIT(rw, __name) ((rw & REQ_ ## __name) << \
207           (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name))
208
209 /*
210  * The worker for the various blk_add_trace*() types. Fills out a
211  * blk_io_trace structure and places it in a per-cpu subbuffer.
212  */
213 static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
214                      int op, int op_flags, u32 what, int error, int pdu_len,
215                      void *pdu_data, union kernfs_node_id *cgid)
216 {
217         struct task_struct *tsk = current;
218         struct ring_buffer_event *event = NULL;
219         struct ring_buffer *buffer = NULL;
220         struct blk_io_trace *t;
221         unsigned long flags = 0;
222         unsigned long *sequence;
223         pid_t pid;
224         int cpu, pc = 0;
225         bool blk_tracer = blk_tracer_enabled;
226         ssize_t cgid_len = cgid ? sizeof(*cgid) : 0;
227
228         if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
229                 return;
230
231         what |= ddir_act[op_is_write(op) ? WRITE : READ];
232         what |= MASK_TC_BIT(op_flags, SYNC);
233         what |= MASK_TC_BIT(op_flags, RAHEAD);
234         what |= MASK_TC_BIT(op_flags, META);
235         what |= MASK_TC_BIT(op_flags, PREFLUSH);
236         what |= MASK_TC_BIT(op_flags, FUA);
237         if (op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)
238                 what |= BLK_TC_ACT(BLK_TC_DISCARD);
239         if (op == REQ_OP_FLUSH)
240                 what |= BLK_TC_ACT(BLK_TC_FLUSH);
241         if (cgid)
242                 what |= __BLK_TA_CGROUP;
243
244         pid = tsk->pid;
245         if (act_log_check(bt, what, sector, pid))
246                 return;
247         cpu = raw_smp_processor_id();
248
249         if (blk_tracer) {
250                 tracing_record_cmdline(current);
251
252                 buffer = blk_tr->trace_buffer.buffer;
253                 pc = preempt_count();
254                 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
255                                                   sizeof(*t) + pdu_len + cgid_len,
256                                                   0, pc);
257                 if (!event)
258                         return;
259                 t = ring_buffer_event_data(event);
260                 goto record_it;
261         }
262
263         if (unlikely(tsk->btrace_seq != blktrace_seq))
264                 trace_note_tsk(tsk);
265
266         /*
267          * A word about the locking here - we disable interrupts to reserve
268          * some space in the relay per-cpu buffer, to prevent an irq
269          * from coming in and stepping on our toes.
270          */
271         local_irq_save(flags);
272         t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len + cgid_len);
273         if (t) {
274                 sequence = per_cpu_ptr(bt->sequence, cpu);
275
276                 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
277                 t->sequence = ++(*sequence);
278                 t->time = ktime_to_ns(ktime_get());
279 record_it:
280                 /*
281                  * These two are not needed in ftrace as they are in the
282                  * generic trace_entry, filled by tracing_generic_entry_update,
283                  * but for the trace_event->bin() synthesizer benefit we do it
284                  * here too.
285                  */
286                 t->cpu = cpu;
287                 t->pid = pid;
288
289                 t->sector = sector;
290                 t->bytes = bytes;
291                 t->action = what;
292                 t->device = bt->dev;
293                 t->error = error;
294                 t->pdu_len = pdu_len + cgid_len;
295
296                 if (cgid_len)
297                         memcpy((void *)t + sizeof(*t), cgid, cgid_len);
298                 if (pdu_len)
299                         memcpy((void *)t + sizeof(*t) + cgid_len, pdu_data, pdu_len);
300
301                 if (blk_tracer) {
302                         trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc);
303                         return;
304                 }
305         }
306
307         local_irq_restore(flags);
308 }
309
310 static void blk_trace_free(struct blk_trace *bt)
311 {
312         debugfs_remove(bt->msg_file);
313         debugfs_remove(bt->dropped_file);
314         relay_close(bt->rchan);
315         debugfs_remove(bt->dir);
316         free_percpu(bt->sequence);
317         free_percpu(bt->msg_data);
318         kfree(bt);
319 }
320
321 static void get_probe_ref(void)
322 {
323         mutex_lock(&blk_probe_mutex);
324         if (++blk_probes_ref == 1)
325                 blk_register_tracepoints();
326         mutex_unlock(&blk_probe_mutex);
327 }
328
329 static void put_probe_ref(void)
330 {
331         mutex_lock(&blk_probe_mutex);
332         if (!--blk_probes_ref)
333                 blk_unregister_tracepoints();
334         mutex_unlock(&blk_probe_mutex);
335 }
336
337 static void blk_trace_cleanup(struct blk_trace *bt)
338 {
339         synchronize_rcu();
340         blk_trace_free(bt);
341         put_probe_ref();
342 }
343
344 static int __blk_trace_remove(struct request_queue *q)
345 {
346         struct blk_trace *bt;
347
348         bt = xchg(&q->blk_trace, NULL);
349         if (!bt)
350                 return -EINVAL;
351
352         if (bt->trace_state != Blktrace_running)
353                 blk_trace_cleanup(bt);
354
355         return 0;
356 }
357
358 int blk_trace_remove(struct request_queue *q)
359 {
360         int ret;
361
362         mutex_lock(&q->blk_trace_mutex);
363         ret = __blk_trace_remove(q);
364         mutex_unlock(&q->blk_trace_mutex);
365
366         return ret;
367 }
368 EXPORT_SYMBOL_GPL(blk_trace_remove);
369
370 static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
371                                 size_t count, loff_t *ppos)
372 {
373         struct blk_trace *bt = filp->private_data;
374         char buf[16];
375
376         snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
377
378         return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
379 }
380
381 static const struct file_operations blk_dropped_fops = {
382         .owner =        THIS_MODULE,
383         .open =         simple_open,
384         .read =         blk_dropped_read,
385         .llseek =       default_llseek,
386 };
387
388 static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
389                                 size_t count, loff_t *ppos)
390 {
391         char *msg;
392         struct blk_trace *bt;
393
394         if (count >= BLK_TN_MAX_MSG)
395                 return -EINVAL;
396
397         msg = memdup_user_nul(buffer, count);
398         if (IS_ERR(msg))
399                 return PTR_ERR(msg);
400
401         bt = filp->private_data;
402         __trace_note_message(bt, NULL, "%s", msg);
403         kfree(msg);
404
405         return count;
406 }
407
408 static const struct file_operations blk_msg_fops = {
409         .owner =        THIS_MODULE,
410         .open =         simple_open,
411         .write =        blk_msg_write,
412         .llseek =       noop_llseek,
413 };
414
415 /*
416  * Keep track of how many times we encountered a full subbuffer, to aid
417  * the user space app in telling how many lost events there were.
418  */
419 static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
420                                      void *prev_subbuf, size_t prev_padding)
421 {
422         struct blk_trace *bt;
423
424         if (!relay_buf_full(buf))
425                 return 1;
426
427         bt = buf->chan->private_data;
428         atomic_inc(&bt->dropped);
429         return 0;
430 }
431
432 static int blk_remove_buf_file_callback(struct dentry *dentry)
433 {
434         debugfs_remove(dentry);
435
436         return 0;
437 }
438
439 static struct dentry *blk_create_buf_file_callback(const char *filename,
440                                                    struct dentry *parent,
441                                                    umode_t mode,
442                                                    struct rchan_buf *buf,
443                                                    int *is_global)
444 {
445         return debugfs_create_file(filename, mode, parent, buf,
446                                         &relay_file_operations);
447 }
448
449 static struct rchan_callbacks blk_relay_callbacks = {
450         .subbuf_start           = blk_subbuf_start_callback,
451         .create_buf_file        = blk_create_buf_file_callback,
452         .remove_buf_file        = blk_remove_buf_file_callback,
453 };
454
455 static void blk_trace_setup_lba(struct blk_trace *bt,
456                                 struct block_device *bdev)
457 {
458         struct hd_struct *part = NULL;
459
460         if (bdev)
461                 part = bdev->bd_part;
462
463         if (part) {
464                 bt->start_lba = part->start_sect;
465                 bt->end_lba = part->start_sect + part->nr_sects;
466         } else {
467                 bt->start_lba = 0;
468                 bt->end_lba = -1ULL;
469         }
470 }
471
472 /*
473  * Setup everything required to start tracing
474  */
475 static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
476                               struct block_device *bdev,
477                               struct blk_user_trace_setup *buts)
478 {
479         struct blk_trace *bt = NULL;
480         struct dentry *dir = NULL;
481         int ret;
482
483         if (!buts->buf_size || !buts->buf_nr)
484                 return -EINVAL;
485
486         if (!blk_debugfs_root)
487                 return -ENOENT;
488
489         strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
490         buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
491
492         /*
493          * some device names have larger paths - convert the slashes
494          * to underscores for this to work as expected
495          */
496         strreplace(buts->name, '/', '_');
497
498         bt = kzalloc(sizeof(*bt), GFP_KERNEL);
499         if (!bt)
500                 return -ENOMEM;
501
502         ret = -ENOMEM;
503         bt->sequence = alloc_percpu(unsigned long);
504         if (!bt->sequence)
505                 goto err;
506
507         bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
508         if (!bt->msg_data)
509                 goto err;
510
511         ret = -ENOENT;
512
513         dir = debugfs_lookup(buts->name, blk_debugfs_root);
514         if (!dir)
515                 bt->dir = dir = debugfs_create_dir(buts->name, blk_debugfs_root);
516
517         bt->dev = dev;
518         atomic_set(&bt->dropped, 0);
519         INIT_LIST_HEAD(&bt->running_list);
520
521         ret = -EIO;
522         bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
523                                                &blk_dropped_fops);
524
525         bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
526
527         bt->rchan = relay_open("trace", dir, buts->buf_size,
528                                 buts->buf_nr, &blk_relay_callbacks, bt);
529         if (!bt->rchan)
530                 goto err;
531
532         bt->act_mask = buts->act_mask;
533         if (!bt->act_mask)
534                 bt->act_mask = (u16) -1;
535
536         blk_trace_setup_lba(bt, bdev);
537
538         /* overwrite with user settings */
539         if (buts->start_lba)
540                 bt->start_lba = buts->start_lba;
541         if (buts->end_lba)
542                 bt->end_lba = buts->end_lba;
543
544         bt->pid = buts->pid;
545         bt->trace_state = Blktrace_setup;
546
547         ret = -EBUSY;
548         if (cmpxchg(&q->blk_trace, NULL, bt))
549                 goto err;
550
551         get_probe_ref();
552
553         ret = 0;
554 err:
555         if (dir && !bt->dir)
556                 dput(dir);
557         if (ret)
558                 blk_trace_free(bt);
559         return ret;
560 }
561
562 static int __blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
563                              struct block_device *bdev, char __user *arg)
564 {
565         struct blk_user_trace_setup buts;
566         int ret;
567
568         ret = copy_from_user(&buts, arg, sizeof(buts));
569         if (ret)
570                 return -EFAULT;
571
572         ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
573         if (ret)
574                 return ret;
575
576         if (copy_to_user(arg, &buts, sizeof(buts))) {
577                 __blk_trace_remove(q);
578                 return -EFAULT;
579         }
580         return 0;
581 }
582
583 int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
584                     struct block_device *bdev,
585                     char __user *arg)
586 {
587         int ret;
588
589         mutex_lock(&q->blk_trace_mutex);
590         ret = __blk_trace_setup(q, name, dev, bdev, arg);
591         mutex_unlock(&q->blk_trace_mutex);
592
593         return ret;
594 }
595 EXPORT_SYMBOL_GPL(blk_trace_setup);
596
597 #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
598 static int compat_blk_trace_setup(struct request_queue *q, char *name,
599                                   dev_t dev, struct block_device *bdev,
600                                   char __user *arg)
601 {
602         struct blk_user_trace_setup buts;
603         struct compat_blk_user_trace_setup cbuts;
604         int ret;
605
606         if (copy_from_user(&cbuts, arg, sizeof(cbuts)))
607                 return -EFAULT;
608
609         buts = (struct blk_user_trace_setup) {
610                 .act_mask = cbuts.act_mask,
611                 .buf_size = cbuts.buf_size,
612                 .buf_nr = cbuts.buf_nr,
613                 .start_lba = cbuts.start_lba,
614                 .end_lba = cbuts.end_lba,
615                 .pid = cbuts.pid,
616         };
617
618         ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
619         if (ret)
620                 return ret;
621
622         if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) {
623                 __blk_trace_remove(q);
624                 return -EFAULT;
625         }
626
627         return 0;
628 }
629 #endif
630
631 static int __blk_trace_startstop(struct request_queue *q, int start)
632 {
633         int ret;
634         struct blk_trace *bt;
635
636         bt = rcu_dereference_protected(q->blk_trace,
637                                        lockdep_is_held(&q->blk_trace_mutex));
638         if (bt == NULL)
639                 return -EINVAL;
640
641         /*
642          * For starting a trace, we can transition from a setup or stopped
643          * trace. For stopping a trace, the state must be running
644          */
645         ret = -EINVAL;
646         if (start) {
647                 if (bt->trace_state == Blktrace_setup ||
648                     bt->trace_state == Blktrace_stopped) {
649                         blktrace_seq++;
650                         smp_mb();
651                         bt->trace_state = Blktrace_running;
652                         spin_lock_irq(&running_trace_lock);
653                         list_add(&bt->running_list, &running_trace_list);
654                         spin_unlock_irq(&running_trace_lock);
655
656                         trace_note_time(bt);
657                         ret = 0;
658                 }
659         } else {
660                 if (bt->trace_state == Blktrace_running) {
661                         bt->trace_state = Blktrace_stopped;
662                         spin_lock_irq(&running_trace_lock);
663                         list_del_init(&bt->running_list);
664                         spin_unlock_irq(&running_trace_lock);
665                         relay_flush(bt->rchan);
666                         ret = 0;
667                 }
668         }
669
670         return ret;
671 }
672
673 int blk_trace_startstop(struct request_queue *q, int start)
674 {
675         int ret;
676
677         mutex_lock(&q->blk_trace_mutex);
678         ret = __blk_trace_startstop(q, start);
679         mutex_unlock(&q->blk_trace_mutex);
680
681         return ret;
682 }
683 EXPORT_SYMBOL_GPL(blk_trace_startstop);
684
685 /*
686  * When reading or writing the blktrace sysfs files, the references to the
687  * opened sysfs or device files should prevent the underlying block device
688  * from being removed. So no further delete protection is really needed.
689  */
690
691 /**
692  * blk_trace_ioctl: - handle the ioctls associated with tracing
693  * @bdev:       the block device
694  * @cmd:        the ioctl cmd
695  * @arg:        the argument data, if any
696  *
697  **/
698 int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
699 {
700         struct request_queue *q;
701         int ret, start = 0;
702         char b[BDEVNAME_SIZE];
703
704         q = bdev_get_queue(bdev);
705         if (!q)
706                 return -ENXIO;
707
708         mutex_lock(&q->blk_trace_mutex);
709
710         switch (cmd) {
711         case BLKTRACESETUP:
712                 bdevname(bdev, b);
713                 ret = __blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
714                 break;
715 #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
716         case BLKTRACESETUP32:
717                 bdevname(bdev, b);
718                 ret = compat_blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
719                 break;
720 #endif
721         case BLKTRACESTART:
722                 start = 1;
723                 /* fall through */
724         case BLKTRACESTOP:
725                 ret = __blk_trace_startstop(q, start);
726                 break;
727         case BLKTRACETEARDOWN:
728                 ret = __blk_trace_remove(q);
729                 break;
730         default:
731                 ret = -ENOTTY;
732                 break;
733         }
734
735         mutex_unlock(&q->blk_trace_mutex);
736         return ret;
737 }
738
739 /**
740  * blk_trace_shutdown: - stop and cleanup trace structures
741  * @q:    the request queue associated with the device
742  *
743  **/
744 void blk_trace_shutdown(struct request_queue *q)
745 {
746         mutex_lock(&q->blk_trace_mutex);
747         if (rcu_dereference_protected(q->blk_trace,
748                                       lockdep_is_held(&q->blk_trace_mutex))) {
749                 __blk_trace_startstop(q, 0);
750                 __blk_trace_remove(q);
751         }
752
753         mutex_unlock(&q->blk_trace_mutex);
754 }
755
756 #ifdef CONFIG_BLK_CGROUP
757 static union kernfs_node_id *
758 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
759 {
760         struct blk_trace *bt;
761
762         /* We don't use the 'bt' value here except as an optimization... */
763         bt = rcu_dereference_protected(q->blk_trace, 1);
764         if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
765                 return NULL;
766
767         if (!bio->bi_blkg)
768                 return NULL;
769         return cgroup_get_kernfs_id(bio_blkcg(bio)->css.cgroup);
770 }
771 #else
772 static union kernfs_node_id *
773 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
774 {
775         return NULL;
776 }
777 #endif
778
779 static union kernfs_node_id *
780 blk_trace_request_get_cgid(struct request_queue *q, struct request *rq)
781 {
782         if (!rq->bio)
783                 return NULL;
784         /* Use the first bio */
785         return blk_trace_bio_get_cgid(q, rq->bio);
786 }
787
788 /*
789  * blktrace probes
790  */
791
792 /**
793  * blk_add_trace_rq - Add a trace for a request oriented action
794  * @rq:         the source request
795  * @error:      return status to log
796  * @nr_bytes:   number of completed bytes
797  * @what:       the action
798  * @cgid:       the cgroup info
799  *
800  * Description:
801  *     Records an action against a request. Will log the bio offset + size.
802  *
803  **/
804 static void blk_add_trace_rq(struct request *rq, int error,
805                              unsigned int nr_bytes, u32 what,
806                              union kernfs_node_id *cgid)
807 {
808         struct blk_trace *bt;
809
810         rcu_read_lock();
811         bt = rcu_dereference(rq->q->blk_trace);
812         if (likely(!bt)) {
813                 rcu_read_unlock();
814                 return;
815         }
816
817         if (blk_rq_is_passthrough(rq))
818                 what |= BLK_TC_ACT(BLK_TC_PC);
819         else
820                 what |= BLK_TC_ACT(BLK_TC_FS);
821
822         __blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, req_op(rq),
823                         rq->cmd_flags, what, error, 0, NULL, cgid);
824         rcu_read_unlock();
825 }
826
827 static void blk_add_trace_rq_insert(void *ignore,
828                                     struct request_queue *q, struct request *rq)
829 {
830         blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_INSERT,
831                          blk_trace_request_get_cgid(q, rq));
832 }
833
834 static void blk_add_trace_rq_issue(void *ignore,
835                                    struct request_queue *q, struct request *rq)
836 {
837         blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_ISSUE,
838                          blk_trace_request_get_cgid(q, rq));
839 }
840
841 static void blk_add_trace_rq_requeue(void *ignore,
842                                      struct request_queue *q,
843                                      struct request *rq)
844 {
845         blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_REQUEUE,
846                          blk_trace_request_get_cgid(q, rq));
847 }
848
849 static void blk_add_trace_rq_complete(void *ignore, struct request *rq,
850                         int error, unsigned int nr_bytes)
851 {
852         blk_add_trace_rq(rq, error, nr_bytes, BLK_TA_COMPLETE,
853                          blk_trace_request_get_cgid(rq->q, rq));
854 }
855
856 /**
857  * blk_add_trace_bio - Add a trace for a bio oriented action
858  * @q:          queue the io is for
859  * @bio:        the source bio
860  * @what:       the action
861  * @error:      error, if any
862  *
863  * Description:
864  *     Records an action against a bio. Will log the bio offset + size.
865  *
866  **/
867 static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
868                               u32 what, int error)
869 {
870         struct blk_trace *bt;
871
872         rcu_read_lock();
873         bt = rcu_dereference(q->blk_trace);
874         if (likely(!bt)) {
875                 rcu_read_unlock();
876                 return;
877         }
878
879         __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
880                         bio_op(bio), bio->bi_opf, what, error, 0, NULL,
881                         blk_trace_bio_get_cgid(q, bio));
882         rcu_read_unlock();
883 }
884
885 static void blk_add_trace_bio_bounce(void *ignore,
886                                      struct request_queue *q, struct bio *bio)
887 {
888         blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0);
889 }
890
891 static void blk_add_trace_bio_complete(void *ignore,
892                                        struct request_queue *q, struct bio *bio,
893                                        int error)
894 {
895         blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error);
896 }
897
898 static void blk_add_trace_bio_backmerge(void *ignore,
899                                         struct request_queue *q,
900                                         struct request *rq,
901                                         struct bio *bio)
902 {
903         blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0);
904 }
905
906 static void blk_add_trace_bio_frontmerge(void *ignore,
907                                          struct request_queue *q,
908                                          struct request *rq,
909                                          struct bio *bio)
910 {
911         blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0);
912 }
913
914 static void blk_add_trace_bio_queue(void *ignore,
915                                     struct request_queue *q, struct bio *bio)
916 {
917         blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0);
918 }
919
920 static void blk_add_trace_getrq(void *ignore,
921                                 struct request_queue *q,
922                                 struct bio *bio, int rw)
923 {
924         if (bio)
925                 blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0);
926         else {
927                 struct blk_trace *bt;
928
929                 rcu_read_lock();
930                 bt = rcu_dereference(q->blk_trace);
931                 if (bt)
932                         __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_GETRQ, 0, 0,
933                                         NULL, NULL);
934                 rcu_read_unlock();
935         }
936 }
937
938
939 static void blk_add_trace_sleeprq(void *ignore,
940                                   struct request_queue *q,
941                                   struct bio *bio, int rw)
942 {
943         if (bio)
944                 blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0);
945         else {
946                 struct blk_trace *bt;
947
948                 rcu_read_lock();
949                 bt = rcu_dereference(q->blk_trace);
950                 if (bt)
951                         __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_SLEEPRQ,
952                                         0, 0, NULL, NULL);
953                 rcu_read_unlock();
954         }
955 }
956
957 static void blk_add_trace_plug(void *ignore, struct request_queue *q)
958 {
959         struct blk_trace *bt;
960
961         rcu_read_lock();
962         bt = rcu_dereference(q->blk_trace);
963         if (bt)
964                 __blk_add_trace(bt, 0, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, NULL);
965         rcu_read_unlock();
966 }
967
968 static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
969                                     unsigned int depth, bool explicit)
970 {
971         struct blk_trace *bt;
972
973         rcu_read_lock();
974         bt = rcu_dereference(q->blk_trace);
975         if (bt) {
976                 __be64 rpdu = cpu_to_be64(depth);
977                 u32 what;
978
979                 if (explicit)
980                         what = BLK_TA_UNPLUG_IO;
981                 else
982                         what = BLK_TA_UNPLUG_TIMER;
983
984                 __blk_add_trace(bt, 0, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, NULL);
985         }
986         rcu_read_unlock();
987 }
988
989 static void blk_add_trace_split(void *ignore,
990                                 struct request_queue *q, struct bio *bio,
991                                 unsigned int pdu)
992 {
993         struct blk_trace *bt;
994
995         rcu_read_lock();
996         bt = rcu_dereference(q->blk_trace);
997         if (bt) {
998                 __be64 rpdu = cpu_to_be64(pdu);
999
1000                 __blk_add_trace(bt, bio->bi_iter.bi_sector,
1001                                 bio->bi_iter.bi_size, bio_op(bio), bio->bi_opf,
1002                                 BLK_TA_SPLIT,
1003                                 blk_status_to_errno(bio->bi_status),
1004                                 sizeof(rpdu), &rpdu,
1005                                 blk_trace_bio_get_cgid(q, bio));
1006         }
1007         rcu_read_unlock();
1008 }
1009
1010 /**
1011  * blk_add_trace_bio_remap - Add a trace for a bio-remap operation
1012  * @ignore:     trace callback data parameter (not used)
1013  * @q:          queue the io is for
1014  * @bio:        the source bio
1015  * @dev:        target device
1016  * @from:       source sector
1017  *
1018  * Description:
1019  *     Device mapper or raid target sometimes need to split a bio because
1020  *     it spans a stripe (or similar). Add a trace for that action.
1021  *
1022  **/
1023 static void blk_add_trace_bio_remap(void *ignore,
1024                                     struct request_queue *q, struct bio *bio,
1025                                     dev_t dev, sector_t from)
1026 {
1027         struct blk_trace *bt;
1028         struct blk_io_trace_remap r;
1029
1030         rcu_read_lock();
1031         bt = rcu_dereference(q->blk_trace);
1032         if (likely(!bt)) {
1033                 rcu_read_unlock();
1034                 return;
1035         }
1036
1037         r.device_from = cpu_to_be32(dev);
1038         r.device_to   = cpu_to_be32(bio_dev(bio));
1039         r.sector_from = cpu_to_be64(from);
1040
1041         __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
1042                         bio_op(bio), bio->bi_opf, BLK_TA_REMAP,
1043                         blk_status_to_errno(bio->bi_status),
1044                         sizeof(r), &r, blk_trace_bio_get_cgid(q, bio));
1045         rcu_read_unlock();
1046 }
1047
1048 /**
1049  * blk_add_trace_rq_remap - Add a trace for a request-remap operation
1050  * @ignore:     trace callback data parameter (not used)
1051  * @q:          queue the io is for
1052  * @rq:         the source request
1053  * @dev:        target device
1054  * @from:       source sector
1055  *
1056  * Description:
1057  *     Device mapper remaps request to other devices.
1058  *     Add a trace for that action.
1059  *
1060  **/
1061 static void blk_add_trace_rq_remap(void *ignore,
1062                                    struct request_queue *q,
1063                                    struct request *rq, dev_t dev,
1064                                    sector_t from)
1065 {
1066         struct blk_trace *bt;
1067         struct blk_io_trace_remap r;
1068
1069         rcu_read_lock();
1070         bt = rcu_dereference(q->blk_trace);
1071         if (likely(!bt)) {
1072                 rcu_read_unlock();
1073                 return;
1074         }
1075
1076         r.device_from = cpu_to_be32(dev);
1077         r.device_to   = cpu_to_be32(disk_devt(rq->rq_disk));
1078         r.sector_from = cpu_to_be64(from);
1079
1080         __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
1081                         rq_data_dir(rq), 0, BLK_TA_REMAP, 0,
1082                         sizeof(r), &r, blk_trace_request_get_cgid(q, rq));
1083         rcu_read_unlock();
1084 }
1085
1086 /**
1087  * blk_add_driver_data - Add binary message with driver-specific data
1088  * @q:          queue the io is for
1089  * @rq:         io request
1090  * @data:       driver-specific data
1091  * @len:        length of driver-specific data
1092  *
1093  * Description:
1094  *     Some drivers might want to write driver-specific data per request.
1095  *
1096  **/
1097 void blk_add_driver_data(struct request_queue *q,
1098                          struct request *rq,
1099                          void *data, size_t len)
1100 {
1101         struct blk_trace *bt;
1102
1103         rcu_read_lock();
1104         bt = rcu_dereference(q->blk_trace);
1105         if (likely(!bt)) {
1106                 rcu_read_unlock();
1107                 return;
1108         }
1109
1110         __blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0, 0,
1111                                 BLK_TA_DRV_DATA, 0, len, data,
1112                                 blk_trace_request_get_cgid(q, rq));
1113         rcu_read_unlock();
1114 }
1115 EXPORT_SYMBOL_GPL(blk_add_driver_data);
1116
1117 static void blk_register_tracepoints(void)
1118 {
1119         int ret;
1120
1121         ret = register_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
1122         WARN_ON(ret);
1123         ret = register_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
1124         WARN_ON(ret);
1125         ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
1126         WARN_ON(ret);
1127         ret = register_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
1128         WARN_ON(ret);
1129         ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
1130         WARN_ON(ret);
1131         ret = register_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
1132         WARN_ON(ret);
1133         ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1134         WARN_ON(ret);
1135         ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1136         WARN_ON(ret);
1137         ret = register_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1138         WARN_ON(ret);
1139         ret = register_trace_block_getrq(blk_add_trace_getrq, NULL);
1140         WARN_ON(ret);
1141         ret = register_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
1142         WARN_ON(ret);
1143         ret = register_trace_block_plug(blk_add_trace_plug, NULL);
1144         WARN_ON(ret);
1145         ret = register_trace_block_unplug(blk_add_trace_unplug, NULL);
1146         WARN_ON(ret);
1147         ret = register_trace_block_split(blk_add_trace_split, NULL);
1148         WARN_ON(ret);
1149         ret = register_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1150         WARN_ON(ret);
1151         ret = register_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1152         WARN_ON(ret);
1153 }
1154
1155 static void blk_unregister_tracepoints(void)
1156 {
1157         unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1158         unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1159         unregister_trace_block_split(blk_add_trace_split, NULL);
1160         unregister_trace_block_unplug(blk_add_trace_unplug, NULL);
1161         unregister_trace_block_plug(blk_add_trace_plug, NULL);
1162         unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
1163         unregister_trace_block_getrq(blk_add_trace_getrq, NULL);
1164         unregister_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1165         unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1166         unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1167         unregister_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
1168         unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
1169         unregister_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
1170         unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
1171         unregister_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
1172         unregister_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
1173
1174         tracepoint_synchronize_unregister();
1175 }
1176
1177 /*
1178  * struct blk_io_tracer formatting routines
1179  */
1180
1181 static void fill_rwbs(char *rwbs, const struct blk_io_trace *t)
1182 {
1183         int i = 0;
1184         int tc = t->action >> BLK_TC_SHIFT;
1185
1186         if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) {
1187                 rwbs[i++] = 'N';
1188                 goto out;
1189         }
1190
1191         if (tc & BLK_TC_FLUSH)
1192                 rwbs[i++] = 'F';
1193
1194         if (tc & BLK_TC_DISCARD)
1195                 rwbs[i++] = 'D';
1196         else if (tc & BLK_TC_WRITE)
1197                 rwbs[i++] = 'W';
1198         else if (t->bytes)
1199                 rwbs[i++] = 'R';
1200         else
1201                 rwbs[i++] = 'N';
1202
1203         if (tc & BLK_TC_FUA)
1204                 rwbs[i++] = 'F';
1205         if (tc & BLK_TC_AHEAD)
1206                 rwbs[i++] = 'A';
1207         if (tc & BLK_TC_SYNC)
1208                 rwbs[i++] = 'S';
1209         if (tc & BLK_TC_META)
1210                 rwbs[i++] = 'M';
1211 out:
1212         rwbs[i] = '\0';
1213 }
1214
1215 static inline
1216 const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent)
1217 {
1218         return (const struct blk_io_trace *)ent;
1219 }
1220
1221 static inline const void *pdu_start(const struct trace_entry *ent, bool has_cg)
1222 {
1223         return (void *)(te_blk_io_trace(ent) + 1) +
1224                 (has_cg ? sizeof(union kernfs_node_id) : 0);
1225 }
1226
1227 static inline const void *cgid_start(const struct trace_entry *ent)
1228 {
1229         return (void *)(te_blk_io_trace(ent) + 1);
1230 }
1231
1232 static inline int pdu_real_len(const struct trace_entry *ent, bool has_cg)
1233 {
1234         return te_blk_io_trace(ent)->pdu_len -
1235                         (has_cg ? sizeof(union kernfs_node_id) : 0);
1236 }
1237
1238 static inline u32 t_action(const struct trace_entry *ent)
1239 {
1240         return te_blk_io_trace(ent)->action;
1241 }
1242
1243 static inline u32 t_bytes(const struct trace_entry *ent)
1244 {
1245         return te_blk_io_trace(ent)->bytes;
1246 }
1247
1248 static inline u32 t_sec(const struct trace_entry *ent)
1249 {
1250         return te_blk_io_trace(ent)->bytes >> 9;
1251 }
1252
1253 static inline unsigned long long t_sector(const struct trace_entry *ent)
1254 {
1255         return te_blk_io_trace(ent)->sector;
1256 }
1257
1258 static inline __u16 t_error(const struct trace_entry *ent)
1259 {
1260         return te_blk_io_trace(ent)->error;
1261 }
1262
1263 static __u64 get_pdu_int(const struct trace_entry *ent, bool has_cg)
1264 {
1265         const __be64 *val = pdu_start(ent, has_cg);
1266         return be64_to_cpu(*val);
1267 }
1268
1269 typedef void (blk_log_action_t) (struct trace_iterator *iter, const char *act,
1270         bool has_cg);
1271
1272 static void blk_log_action_classic(struct trace_iterator *iter, const char *act,
1273         bool has_cg)
1274 {
1275         char rwbs[RWBS_LEN];
1276         unsigned long long ts  = iter->ts;
1277         unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC);
1278         unsigned secs          = (unsigned long)ts;
1279         const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1280
1281         fill_rwbs(rwbs, t);
1282
1283         trace_seq_printf(&iter->seq,
1284                          "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ",
1285                          MAJOR(t->device), MINOR(t->device), iter->cpu,
1286                          secs, nsec_rem, iter->ent->pid, act, rwbs);
1287 }
1288
1289 static void blk_log_action(struct trace_iterator *iter, const char *act,
1290         bool has_cg)
1291 {
1292         char rwbs[RWBS_LEN];
1293         const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1294
1295         fill_rwbs(rwbs, t);
1296         if (has_cg) {
1297                 const union kernfs_node_id *id = cgid_start(iter->ent);
1298
1299                 if (blk_tracer_flags.val & TRACE_BLK_OPT_CGNAME) {
1300                         char blkcg_name_buf[NAME_MAX + 1] = "<...>";
1301
1302                         cgroup_path_from_kernfs_id(id, blkcg_name_buf,
1303                                 sizeof(blkcg_name_buf));
1304                         trace_seq_printf(&iter->seq, "%3d,%-3d %s %2s %3s ",
1305                                  MAJOR(t->device), MINOR(t->device),
1306                                  blkcg_name_buf, act, rwbs);
1307                 } else
1308                         trace_seq_printf(&iter->seq,
1309                                  "%3d,%-3d %x,%-x %2s %3s ",
1310                                  MAJOR(t->device), MINOR(t->device),
1311                                  id->ino, id->generation, act, rwbs);
1312         } else
1313                 trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ",
1314                                  MAJOR(t->device), MINOR(t->device), act, rwbs);
1315 }
1316
1317 static void blk_log_dump_pdu(struct trace_seq *s,
1318         const struct trace_entry *ent, bool has_cg)
1319 {
1320         const unsigned char *pdu_buf;
1321         int pdu_len;
1322         int i, end;
1323
1324         pdu_buf = pdu_start(ent, has_cg);
1325         pdu_len = pdu_real_len(ent, has_cg);
1326
1327         if (!pdu_len)
1328                 return;
1329
1330         /* find the last zero that needs to be printed */
1331         for (end = pdu_len - 1; end >= 0; end--)
1332                 if (pdu_buf[end])
1333                         break;
1334         end++;
1335
1336         trace_seq_putc(s, '(');
1337
1338         for (i = 0; i < pdu_len; i++) {
1339
1340                 trace_seq_printf(s, "%s%02x",
1341                                  i == 0 ? "" : " ", pdu_buf[i]);
1342
1343                 /*
1344                  * stop when the rest is just zeroes and indicate so
1345                  * with a ".." appended
1346                  */
1347                 if (i == end && end != pdu_len - 1) {
1348                         trace_seq_puts(s, " ..) ");
1349                         return;
1350                 }
1351         }
1352
1353         trace_seq_puts(s, ") ");
1354 }
1355
1356 static void blk_log_generic(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1357 {
1358         char cmd[TASK_COMM_LEN];
1359
1360         trace_find_cmdline(ent->pid, cmd);
1361
1362         if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1363                 trace_seq_printf(s, "%u ", t_bytes(ent));
1364                 blk_log_dump_pdu(s, ent, has_cg);
1365                 trace_seq_printf(s, "[%s]\n", cmd);
1366         } else {
1367                 if (t_sec(ent))
1368                         trace_seq_printf(s, "%llu + %u [%s]\n",
1369                                                 t_sector(ent), t_sec(ent), cmd);
1370                 else
1371                         trace_seq_printf(s, "[%s]\n", cmd);
1372         }
1373 }
1374
1375 static void blk_log_with_error(struct trace_seq *s,
1376                               const struct trace_entry *ent, bool has_cg)
1377 {
1378         if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1379                 blk_log_dump_pdu(s, ent, has_cg);
1380                 trace_seq_printf(s, "[%d]\n", t_error(ent));
1381         } else {
1382                 if (t_sec(ent))
1383                         trace_seq_printf(s, "%llu + %u [%d]\n",
1384                                          t_sector(ent),
1385                                          t_sec(ent), t_error(ent));
1386                 else
1387                         trace_seq_printf(s, "%llu [%d]\n",
1388                                          t_sector(ent), t_error(ent));
1389         }
1390 }
1391
1392 static void blk_log_remap(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1393 {
1394         const struct blk_io_trace_remap *__r = pdu_start(ent, has_cg);
1395
1396         trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n",
1397                          t_sector(ent), t_sec(ent),
1398                          MAJOR(be32_to_cpu(__r->device_from)),
1399                          MINOR(be32_to_cpu(__r->device_from)),
1400                          be64_to_cpu(__r->sector_from));
1401 }
1402
1403 static void blk_log_plug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1404 {
1405         char cmd[TASK_COMM_LEN];
1406
1407         trace_find_cmdline(ent->pid, cmd);
1408
1409         trace_seq_printf(s, "[%s]\n", cmd);
1410 }
1411
1412 static void blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1413 {
1414         char cmd[TASK_COMM_LEN];
1415
1416         trace_find_cmdline(ent->pid, cmd);
1417
1418         trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent, has_cg));
1419 }
1420
1421 static void blk_log_split(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1422 {
1423         char cmd[TASK_COMM_LEN];
1424
1425         trace_find_cmdline(ent->pid, cmd);
1426
1427         trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent),
1428                          get_pdu_int(ent, has_cg), cmd);
1429 }
1430
1431 static void blk_log_msg(struct trace_seq *s, const struct trace_entry *ent,
1432                         bool has_cg)
1433 {
1434
1435         trace_seq_putmem(s, pdu_start(ent, has_cg),
1436                 pdu_real_len(ent, has_cg));
1437         trace_seq_putc(s, '\n');
1438 }
1439
1440 /*
1441  * struct tracer operations
1442  */
1443
1444 static void blk_tracer_print_header(struct seq_file *m)
1445 {
1446         if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1447                 return;
1448         seq_puts(m, "# DEV   CPU TIMESTAMP     PID ACT FLG\n"
1449                     "#  |     |     |           |   |   |\n");
1450 }
1451
1452 static void blk_tracer_start(struct trace_array *tr)
1453 {
1454         blk_tracer_enabled = true;
1455 }
1456
1457 static int blk_tracer_init(struct trace_array *tr)
1458 {
1459         blk_tr = tr;
1460         blk_tracer_start(tr);
1461         return 0;
1462 }
1463
1464 static void blk_tracer_stop(struct trace_array *tr)
1465 {
1466         blk_tracer_enabled = false;
1467 }
1468
1469 static void blk_tracer_reset(struct trace_array *tr)
1470 {
1471         blk_tracer_stop(tr);
1472 }
1473
1474 static const struct {
1475         const char *act[2];
1476         void       (*print)(struct trace_seq *s, const struct trace_entry *ent,
1477                             bool has_cg);
1478 } what2act[] = {
1479         [__BLK_TA_QUEUE]        = {{  "Q", "queue" },      blk_log_generic },
1480         [__BLK_TA_BACKMERGE]    = {{  "M", "backmerge" },  blk_log_generic },
1481         [__BLK_TA_FRONTMERGE]   = {{  "F", "frontmerge" }, blk_log_generic },
1482         [__BLK_TA_GETRQ]        = {{  "G", "getrq" },      blk_log_generic },
1483         [__BLK_TA_SLEEPRQ]      = {{  "S", "sleeprq" },    blk_log_generic },
1484         [__BLK_TA_REQUEUE]      = {{  "R", "requeue" },    blk_log_with_error },
1485         [__BLK_TA_ISSUE]        = {{  "D", "issue" },      blk_log_generic },
1486         [__BLK_TA_COMPLETE]     = {{  "C", "complete" },   blk_log_with_error },
1487         [__BLK_TA_PLUG]         = {{  "P", "plug" },       blk_log_plug },
1488         [__BLK_TA_UNPLUG_IO]    = {{  "U", "unplug_io" },  blk_log_unplug },
1489         [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug },
1490         [__BLK_TA_INSERT]       = {{  "I", "insert" },     blk_log_generic },
1491         [__BLK_TA_SPLIT]        = {{  "X", "split" },      blk_log_split },
1492         [__BLK_TA_BOUNCE]       = {{  "B", "bounce" },     blk_log_generic },
1493         [__BLK_TA_REMAP]        = {{  "A", "remap" },      blk_log_remap },
1494 };
1495
1496 static enum print_line_t print_one_line(struct trace_iterator *iter,
1497                                         bool classic)
1498 {
1499         struct trace_array *tr = iter->tr;
1500         struct trace_seq *s = &iter->seq;
1501         const struct blk_io_trace *t;
1502         u16 what;
1503         bool long_act;
1504         blk_log_action_t *log_action;
1505         bool has_cg;
1506
1507         t          = te_blk_io_trace(iter->ent);
1508         what       = (t->action & ((1 << BLK_TC_SHIFT) - 1)) & ~__BLK_TA_CGROUP;
1509         long_act   = !!(tr->trace_flags & TRACE_ITER_VERBOSE);
1510         log_action = classic ? &blk_log_action_classic : &blk_log_action;
1511         has_cg     = t->action & __BLK_TA_CGROUP;
1512
1513         if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) {
1514                 log_action(iter, long_act ? "message" : "m", has_cg);
1515                 blk_log_msg(s, iter->ent, has_cg);
1516                 return trace_handle_return(s);
1517         }
1518
1519         if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act)))
1520                 trace_seq_printf(s, "Unknown action %x\n", what);
1521         else {
1522                 log_action(iter, what2act[what].act[long_act], has_cg);
1523                 what2act[what].print(s, iter->ent, has_cg);
1524         }
1525
1526         return trace_handle_return(s);
1527 }
1528
1529 static enum print_line_t blk_trace_event_print(struct trace_iterator *iter,
1530                                                int flags, struct trace_event *event)
1531 {
1532         return print_one_line(iter, false);
1533 }
1534
1535 static void blk_trace_synthesize_old_trace(struct trace_iterator *iter)
1536 {
1537         struct trace_seq *s = &iter->seq;
1538         struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
1539         const int offset = offsetof(struct blk_io_trace, sector);
1540         struct blk_io_trace old = {
1541                 .magic    = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION,
1542                 .time     = iter->ts,
1543         };
1544
1545         trace_seq_putmem(s, &old, offset);
1546         trace_seq_putmem(s, &t->sector,
1547                          sizeof(old) - offset + t->pdu_len);
1548 }
1549
1550 static enum print_line_t
1551 blk_trace_event_print_binary(struct trace_iterator *iter, int flags,
1552                              struct trace_event *event)
1553 {
1554         blk_trace_synthesize_old_trace(iter);
1555
1556         return trace_handle_return(&iter->seq);
1557 }
1558
1559 static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
1560 {
1561         if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1562                 return TRACE_TYPE_UNHANDLED;
1563
1564         return print_one_line(iter, true);
1565 }
1566
1567 static int
1568 blk_tracer_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1569 {
1570         /* don't output context-info for blk_classic output */
1571         if (bit == TRACE_BLK_OPT_CLASSIC) {
1572                 if (set)
1573                         tr->trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
1574                 else
1575                         tr->trace_flags |= TRACE_ITER_CONTEXT_INFO;
1576         }
1577         return 0;
1578 }
1579
1580 static struct tracer blk_tracer __read_mostly = {
1581         .name           = "blk",
1582         .init           = blk_tracer_init,
1583         .reset          = blk_tracer_reset,
1584         .start          = blk_tracer_start,
1585         .stop           = blk_tracer_stop,
1586         .print_header   = blk_tracer_print_header,
1587         .print_line     = blk_tracer_print_line,
1588         .flags          = &blk_tracer_flags,
1589         .set_flag       = blk_tracer_set_flag,
1590 };
1591
1592 static struct trace_event_functions trace_blk_event_funcs = {
1593         .trace          = blk_trace_event_print,
1594         .binary         = blk_trace_event_print_binary,
1595 };
1596
1597 static struct trace_event trace_blk_event = {
1598         .type           = TRACE_BLK,
1599         .funcs          = &trace_blk_event_funcs,
1600 };
1601
1602 static int __init init_blk_tracer(void)
1603 {
1604         if (!register_trace_event(&trace_blk_event)) {
1605                 pr_warn("Warning: could not register block events\n");
1606                 return 1;
1607         }
1608
1609         if (register_tracer(&blk_tracer) != 0) {
1610                 pr_warn("Warning: could not register the block tracer\n");
1611                 unregister_trace_event(&trace_blk_event);
1612                 return 1;
1613         }
1614
1615         return 0;
1616 }
1617
1618 device_initcall(init_blk_tracer);
1619
1620 static int blk_trace_remove_queue(struct request_queue *q)
1621 {
1622         struct blk_trace *bt;
1623
1624         bt = xchg(&q->blk_trace, NULL);
1625         if (bt == NULL)
1626                 return -EINVAL;
1627
1628         put_probe_ref();
1629         synchronize_rcu();
1630         blk_trace_free(bt);
1631         return 0;
1632 }
1633
1634 /*
1635  * Setup everything required to start tracing
1636  */
1637 static int blk_trace_setup_queue(struct request_queue *q,
1638                                  struct block_device *bdev)
1639 {
1640         struct blk_trace *bt = NULL;
1641         int ret = -ENOMEM;
1642
1643         bt = kzalloc(sizeof(*bt), GFP_KERNEL);
1644         if (!bt)
1645                 return -ENOMEM;
1646
1647         bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
1648         if (!bt->msg_data)
1649                 goto free_bt;
1650
1651         bt->dev = bdev->bd_dev;
1652         bt->act_mask = (u16)-1;
1653
1654         blk_trace_setup_lba(bt, bdev);
1655
1656         ret = -EBUSY;
1657         if (cmpxchg(&q->blk_trace, NULL, bt))
1658                 goto free_bt;
1659
1660         get_probe_ref();
1661         return 0;
1662
1663 free_bt:
1664         blk_trace_free(bt);
1665         return ret;
1666 }
1667
1668 /*
1669  * sysfs interface to enable and configure tracing
1670  */
1671
1672 static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1673                                          struct device_attribute *attr,
1674                                          char *buf);
1675 static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1676                                           struct device_attribute *attr,
1677                                           const char *buf, size_t count);
1678 #define BLK_TRACE_DEVICE_ATTR(_name) \
1679         DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
1680                     sysfs_blk_trace_attr_show, \
1681                     sysfs_blk_trace_attr_store)
1682
1683 static BLK_TRACE_DEVICE_ATTR(enable);
1684 static BLK_TRACE_DEVICE_ATTR(act_mask);
1685 static BLK_TRACE_DEVICE_ATTR(pid);
1686 static BLK_TRACE_DEVICE_ATTR(start_lba);
1687 static BLK_TRACE_DEVICE_ATTR(end_lba);
1688
1689 static struct attribute *blk_trace_attrs[] = {
1690         &dev_attr_enable.attr,
1691         &dev_attr_act_mask.attr,
1692         &dev_attr_pid.attr,
1693         &dev_attr_start_lba.attr,
1694         &dev_attr_end_lba.attr,
1695         NULL
1696 };
1697
1698 struct attribute_group blk_trace_attr_group = {
1699         .name  = "trace",
1700         .attrs = blk_trace_attrs,
1701 };
1702
1703 static const struct {
1704         int mask;
1705         const char *str;
1706 } mask_maps[] = {
1707         { BLK_TC_READ,          "read"          },
1708         { BLK_TC_WRITE,         "write"         },
1709         { BLK_TC_FLUSH,         "flush"         },
1710         { BLK_TC_SYNC,          "sync"          },
1711         { BLK_TC_QUEUE,         "queue"         },
1712         { BLK_TC_REQUEUE,       "requeue"       },
1713         { BLK_TC_ISSUE,         "issue"         },
1714         { BLK_TC_COMPLETE,      "complete"      },
1715         { BLK_TC_FS,            "fs"            },
1716         { BLK_TC_PC,            "pc"            },
1717         { BLK_TC_NOTIFY,        "notify"        },
1718         { BLK_TC_AHEAD,         "ahead"         },
1719         { BLK_TC_META,          "meta"          },
1720         { BLK_TC_DISCARD,       "discard"       },
1721         { BLK_TC_DRV_DATA,      "drv_data"      },
1722         { BLK_TC_FUA,           "fua"           },
1723 };
1724
1725 static int blk_trace_str2mask(const char *str)
1726 {
1727         int i;
1728         int mask = 0;
1729         char *buf, *s, *token;
1730
1731         buf = kstrdup(str, GFP_KERNEL);
1732         if (buf == NULL)
1733                 return -ENOMEM;
1734         s = strstrip(buf);
1735
1736         while (1) {
1737                 token = strsep(&s, ",");
1738                 if (token == NULL)
1739                         break;
1740
1741                 if (*token == '\0')
1742                         continue;
1743
1744                 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1745                         if (strcasecmp(token, mask_maps[i].str) == 0) {
1746                                 mask |= mask_maps[i].mask;
1747                                 break;
1748                         }
1749                 }
1750                 if (i == ARRAY_SIZE(mask_maps)) {
1751                         mask = -EINVAL;
1752                         break;
1753                 }
1754         }
1755         kfree(buf);
1756
1757         return mask;
1758 }
1759
1760 static ssize_t blk_trace_mask2str(char *buf, int mask)
1761 {
1762         int i;
1763         char *p = buf;
1764
1765         for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1766                 if (mask & mask_maps[i].mask) {
1767                         p += sprintf(p, "%s%s",
1768                                     (p == buf) ? "" : ",", mask_maps[i].str);
1769                 }
1770         }
1771         *p++ = '\n';
1772
1773         return p - buf;
1774 }
1775
1776 static struct request_queue *blk_trace_get_queue(struct block_device *bdev)
1777 {
1778         if (bdev->bd_disk == NULL)
1779                 return NULL;
1780
1781         return bdev_get_queue(bdev);
1782 }
1783
1784 static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1785                                          struct device_attribute *attr,
1786                                          char *buf)
1787 {
1788         struct hd_struct *p = dev_to_part(dev);
1789         struct request_queue *q;
1790         struct block_device *bdev;
1791         struct blk_trace *bt;
1792         ssize_t ret = -ENXIO;
1793
1794         bdev = bdget(part_devt(p));
1795         if (bdev == NULL)
1796                 goto out;
1797
1798         q = blk_trace_get_queue(bdev);
1799         if (q == NULL)
1800                 goto out_bdput;
1801
1802         mutex_lock(&q->blk_trace_mutex);
1803
1804         bt = rcu_dereference_protected(q->blk_trace,
1805                                        lockdep_is_held(&q->blk_trace_mutex));
1806         if (attr == &dev_attr_enable) {
1807                 ret = sprintf(buf, "%u\n", !!bt);
1808                 goto out_unlock_bdev;
1809         }
1810
1811         if (bt == NULL)
1812                 ret = sprintf(buf, "disabled\n");
1813         else if (attr == &dev_attr_act_mask)
1814                 ret = blk_trace_mask2str(buf, bt->act_mask);
1815         else if (attr == &dev_attr_pid)
1816                 ret = sprintf(buf, "%u\n", bt->pid);
1817         else if (attr == &dev_attr_start_lba)
1818                 ret = sprintf(buf, "%llu\n", bt->start_lba);
1819         else if (attr == &dev_attr_end_lba)
1820                 ret = sprintf(buf, "%llu\n", bt->end_lba);
1821
1822 out_unlock_bdev:
1823         mutex_unlock(&q->blk_trace_mutex);
1824 out_bdput:
1825         bdput(bdev);
1826 out:
1827         return ret;
1828 }
1829
1830 static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1831                                           struct device_attribute *attr,
1832                                           const char *buf, size_t count)
1833 {
1834         struct block_device *bdev;
1835         struct request_queue *q;
1836         struct hd_struct *p;
1837         struct blk_trace *bt;
1838         u64 value;
1839         ssize_t ret = -EINVAL;
1840
1841         if (count == 0)
1842                 goto out;
1843
1844         if (attr == &dev_attr_act_mask) {
1845                 if (kstrtoull(buf, 0, &value)) {
1846                         /* Assume it is a list of trace category names */
1847                         ret = blk_trace_str2mask(buf);
1848                         if (ret < 0)
1849                                 goto out;
1850                         value = ret;
1851                 }
1852         } else if (kstrtoull(buf, 0, &value))
1853                 goto out;
1854
1855         ret = -ENXIO;
1856
1857         p = dev_to_part(dev);
1858         bdev = bdget(part_devt(p));
1859         if (bdev == NULL)
1860                 goto out;
1861
1862         q = blk_trace_get_queue(bdev);
1863         if (q == NULL)
1864                 goto out_bdput;
1865
1866         mutex_lock(&q->blk_trace_mutex);
1867
1868         bt = rcu_dereference_protected(q->blk_trace,
1869                                        lockdep_is_held(&q->blk_trace_mutex));
1870         if (attr == &dev_attr_enable) {
1871                 if (!!value == !!bt) {
1872                         ret = 0;
1873                         goto out_unlock_bdev;
1874                 }
1875                 if (value)
1876                         ret = blk_trace_setup_queue(q, bdev);
1877                 else
1878                         ret = blk_trace_remove_queue(q);
1879                 goto out_unlock_bdev;
1880         }
1881
1882         ret = 0;
1883         if (bt == NULL) {
1884                 ret = blk_trace_setup_queue(q, bdev);
1885                 bt = rcu_dereference_protected(q->blk_trace,
1886                                 lockdep_is_held(&q->blk_trace_mutex));
1887         }
1888
1889         if (ret == 0) {
1890                 if (attr == &dev_attr_act_mask)
1891                         bt->act_mask = value;
1892                 else if (attr == &dev_attr_pid)
1893                         bt->pid = value;
1894                 else if (attr == &dev_attr_start_lba)
1895                         bt->start_lba = value;
1896                 else if (attr == &dev_attr_end_lba)
1897                         bt->end_lba = value;
1898         }
1899
1900 out_unlock_bdev:
1901         mutex_unlock(&q->blk_trace_mutex);
1902 out_bdput:
1903         bdput(bdev);
1904 out:
1905         return ret ? ret : count;
1906 }
1907
1908 int blk_trace_init_sysfs(struct device *dev)
1909 {
1910         return sysfs_create_group(&dev->kobj, &blk_trace_attr_group);
1911 }
1912
1913 void blk_trace_remove_sysfs(struct device *dev)
1914 {
1915         sysfs_remove_group(&dev->kobj, &blk_trace_attr_group);
1916 }
1917
1918 #endif /* CONFIG_BLK_DEV_IO_TRACE */
1919
1920 #ifdef CONFIG_EVENT_TRACING
1921
1922 void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes)
1923 {
1924         int i = 0;
1925
1926         if (op & REQ_PREFLUSH)
1927                 rwbs[i++] = 'F';
1928
1929         switch (op & REQ_OP_MASK) {
1930         case REQ_OP_WRITE:
1931         case REQ_OP_WRITE_SAME:
1932                 rwbs[i++] = 'W';
1933                 break;
1934         case REQ_OP_DISCARD:
1935                 rwbs[i++] = 'D';
1936                 break;
1937         case REQ_OP_SECURE_ERASE:
1938                 rwbs[i++] = 'D';
1939                 rwbs[i++] = 'E';
1940                 break;
1941         case REQ_OP_FLUSH:
1942                 rwbs[i++] = 'F';
1943                 break;
1944         case REQ_OP_READ:
1945                 rwbs[i++] = 'R';
1946                 break;
1947         default:
1948                 rwbs[i++] = 'N';
1949         }
1950
1951         if (op & REQ_FUA)
1952                 rwbs[i++] = 'F';
1953         if (op & REQ_RAHEAD)
1954                 rwbs[i++] = 'A';
1955         if (op & REQ_SYNC)
1956                 rwbs[i++] = 'S';
1957         if (op & REQ_META)
1958                 rwbs[i++] = 'M';
1959
1960         rwbs[i] = '\0';
1961 }
1962 EXPORT_SYMBOL_GPL(blk_fill_rwbs);
1963
1964 #endif /* CONFIG_EVENT_TRACING */
1965