Linux-libre 4.19.123-gnu
[librecmc/linux-libre.git] / drivers / ntb / test / ntb_perf.c
1 /*
2  * This file is provided under a dual BSD/GPLv2 license.  When using or
3  *   redistributing this file, you may do so under either license.
4  *
5  *   GPL LICENSE SUMMARY
6  *
7  *   Copyright(c) 2015 Intel Corporation. All rights reserved.
8  *   Copyright(c) 2017 T-Platforms. All Rights Reserved.
9  *
10  *   This program is free software; you can redistribute it and/or modify
11  *   it under the terms of version 2 of the GNU General Public License as
12  *   published by the Free Software Foundation.
13  *
14  *   BSD LICENSE
15  *
16  *   Copyright(c) 2015 Intel Corporation. All rights reserved.
17  *   Copyright(c) 2017 T-Platforms. All Rights Reserved.
18  *
19  *   Redistribution and use in source and binary forms, with or without
20  *   modification, are permitted provided that the following conditions
21  *   are met:
22  *
23  *     * Redistributions of source code must retain the above copyright
24  *       notice, this list of conditions and the following disclaimer.
25  *     * Redistributions in binary form must reproduce the above copy
26  *       notice, this list of conditions and the following disclaimer in
27  *       the documentation and/or other materials provided with the
28  *       distribution.
29  *     * Neither the name of Intel Corporation nor the names of its
30  *       contributors may be used to endorse or promote products derived
31  *       from this software without specific prior written permission.
32  *
33  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
34  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
35  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
36  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
37  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
38  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
39  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
40  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
41  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
42  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
43  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
44  *
45  * PCIe NTB Perf Linux driver
46  */
47
48 /*
49  * How to use this tool, by example.
50  *
51  * Assuming $DBG_DIR is something like:
52  * '/sys/kernel/debug/ntb_perf/0000:00:03.0'
53  * Suppose aside from local device there is at least one remote device
54  * connected to NTB with index 0.
55  *-----------------------------------------------------------------------------
56  * Eg: install driver with specified chunk/total orders and dma-enabled flag
57  *
58  * root@self# insmod ntb_perf.ko chunk_order=19 total_order=28 use_dma
59  *-----------------------------------------------------------------------------
60  * Eg: check NTB ports (index) and MW mapping information
61  *
62  * root@self# cat $DBG_DIR/info
63  *-----------------------------------------------------------------------------
64  * Eg: start performance test with peer (index 0) and get the test metrics
65  *
66  * root@self# echo 0 > $DBG_DIR/run
67  * root@self# cat $DBG_DIR/run
68  */
69
70 #include <linux/init.h>
71 #include <linux/kernel.h>
72 #include <linux/module.h>
73 #include <linux/sched.h>
74 #include <linux/wait.h>
75 #include <linux/dma-mapping.h>
76 #include <linux/dmaengine.h>
77 #include <linux/pci.h>
78 #include <linux/ktime.h>
79 #include <linux/slab.h>
80 #include <linux/delay.h>
81 #include <linux/sizes.h>
82 #include <linux/workqueue.h>
83 #include <linux/debugfs.h>
84 #include <linux/random.h>
85 #include <linux/ntb.h>
86
87 #define DRIVER_NAME             "ntb_perf"
88 #define DRIVER_VERSION          "2.0"
89
90 MODULE_LICENSE("Dual BSD/GPL");
91 MODULE_VERSION(DRIVER_VERSION);
92 MODULE_AUTHOR("Dave Jiang <dave.jiang@intel.com>");
93 MODULE_DESCRIPTION("PCIe NTB Performance Measurement Tool");
94
95 #define MAX_THREADS_CNT         32
96 #define DEF_THREADS_CNT         1
97 #define MAX_CHUNK_SIZE          SZ_1M
98 #define MAX_CHUNK_ORDER         20 /* no larger than 1M */
99
100 #define DMA_TRIES               100
101 #define DMA_MDELAY              10
102
103 #define MSG_TRIES               500
104 #define MSG_UDELAY_LOW          1000
105 #define MSG_UDELAY_HIGH         2000
106
107 #define PERF_BUF_LEN 1024
108
109 static unsigned long max_mw_size;
110 module_param(max_mw_size, ulong, 0644);
111 MODULE_PARM_DESC(max_mw_size, "Upper limit of memory window size");
112
113 static unsigned char chunk_order = 19; /* 512K */
114 module_param(chunk_order, byte, 0644);
115 MODULE_PARM_DESC(chunk_order, "Data chunk order [2^n] to transfer");
116
117 static unsigned char total_order = 30; /* 1G */
118 module_param(total_order, byte, 0644);
119 MODULE_PARM_DESC(total_order, "Total data order [2^n] to transfer");
120
121 static bool use_dma; /* default to 0 */
122 module_param(use_dma, bool, 0644);
123 MODULE_PARM_DESC(use_dma, "Use DMA engine to measure performance");
124
125 /*==============================================================================
126  *                         Perf driver data definition
127  *==============================================================================
128  */
129
130 enum perf_cmd {
131         PERF_CMD_INVAL = -1,/* invalid spad command */
132         PERF_CMD_SSIZE = 0, /* send out buffer size */
133         PERF_CMD_RSIZE = 1, /* recv in  buffer size */
134         PERF_CMD_SXLAT = 2, /* send in  buffer xlat */
135         PERF_CMD_RXLAT = 3, /* recv out buffer xlat */
136         PERF_CMD_CLEAR = 4, /* clear allocated memory */
137         PERF_STS_DONE  = 5, /* init is done */
138         PERF_STS_LNKUP = 6, /* link up state flag */
139 };
140
141 struct perf_ctx;
142
143 struct perf_peer {
144         struct perf_ctx *perf;
145         int pidx;
146         int gidx;
147
148         /* Outbound MW params */
149         u64 outbuf_xlat;
150         resource_size_t outbuf_size;
151         void __iomem *outbuf;
152
153         /* Inbound MW params */
154         dma_addr_t inbuf_xlat;
155         resource_size_t inbuf_size;
156         void            *inbuf;
157
158         /* NTB connection setup service */
159         struct work_struct      service;
160         unsigned long           sts;
161 };
162 #define to_peer_service(__work) \
163         container_of(__work, struct perf_peer, service)
164
165 struct perf_thread {
166         struct perf_ctx *perf;
167         int tidx;
168
169         /* DMA-based test sync parameters */
170         atomic_t dma_sync;
171         wait_queue_head_t dma_wait;
172         struct dma_chan *dma_chan;
173
174         /* Data source and measured statistics */
175         void *src;
176         u64 copied;
177         ktime_t duration;
178         int status;
179         struct work_struct work;
180 };
181 #define to_thread_work(__work) \
182         container_of(__work, struct perf_thread, work)
183
184 struct perf_ctx {
185         struct ntb_dev *ntb;
186
187         /* Global device index and peers descriptors */
188         int gidx;
189         int pcnt;
190         struct perf_peer *peers;
191
192         /* Performance measuring work-threads interface */
193         unsigned long busy_flag;
194         wait_queue_head_t twait;
195         atomic_t tsync;
196         u8 tcnt;
197         struct perf_peer *test_peer;
198         struct perf_thread threads[MAX_THREADS_CNT];
199
200         /* Scratchpad/Message IO operations */
201         int (*cmd_send)(struct perf_peer *peer, enum perf_cmd cmd, u64 data);
202         int (*cmd_recv)(struct perf_ctx *perf, int *pidx, enum perf_cmd *cmd,
203                         u64 *data);
204
205         struct dentry *dbgfs_dir;
206 };
207
208 /*
209  * Scratchpads-base commands interface
210  */
211 #define PERF_SPAD_CNT(_pcnt) \
212         (3*((_pcnt) + 1))
213 #define PERF_SPAD_CMD(_gidx) \
214         (3*(_gidx))
215 #define PERF_SPAD_LDATA(_gidx) \
216         (3*(_gidx) + 1)
217 #define PERF_SPAD_HDATA(_gidx) \
218         (3*(_gidx) + 2)
219 #define PERF_SPAD_NOTIFY(_gidx) \
220         (BIT_ULL(_gidx))
221
222 /*
223  * Messages-base commands interface
224  */
225 #define PERF_MSG_CNT            3
226 #define PERF_MSG_CMD            0
227 #define PERF_MSG_LDATA          1
228 #define PERF_MSG_HDATA          2
229
230 /*==============================================================================
231  *                           Static data declarations
232  *==============================================================================
233  */
234
235 static struct dentry *perf_dbgfs_topdir;
236
237 static struct workqueue_struct *perf_wq __read_mostly;
238
239 /*==============================================================================
240  *                  NTB cross-link commands execution service
241  *==============================================================================
242  */
243
244 static void perf_terminate_test(struct perf_ctx *perf);
245
246 static inline bool perf_link_is_up(struct perf_peer *peer)
247 {
248         u64 link;
249
250         link = ntb_link_is_up(peer->perf->ntb, NULL, NULL);
251         return !!(link & BIT_ULL_MASK(peer->pidx));
252 }
253
254 static int perf_spad_cmd_send(struct perf_peer *peer, enum perf_cmd cmd,
255                               u64 data)
256 {
257         struct perf_ctx *perf = peer->perf;
258         int try;
259         u32 sts;
260
261         dev_dbg(&perf->ntb->dev, "CMD send: %d 0x%llx\n", cmd, data);
262
263         /*
264          * Perform predefined number of attempts before give up.
265          * We are sending the data to the port specific scratchpad, so
266          * to prevent a multi-port access race-condition. Additionally
267          * there is no need in local locking since only thread-safe
268          * service work is using this method.
269          */
270         for (try = 0; try < MSG_TRIES; try++) {
271                 if (!perf_link_is_up(peer))
272                         return -ENOLINK;
273
274                 sts = ntb_peer_spad_read(perf->ntb, peer->pidx,
275                                          PERF_SPAD_CMD(perf->gidx));
276                 if (sts != PERF_CMD_INVAL) {
277                         usleep_range(MSG_UDELAY_LOW, MSG_UDELAY_HIGH);
278                         continue;
279                 }
280
281                 ntb_peer_spad_write(perf->ntb, peer->pidx,
282                                     PERF_SPAD_LDATA(perf->gidx),
283                                     lower_32_bits(data));
284                 ntb_peer_spad_write(perf->ntb, peer->pidx,
285                                     PERF_SPAD_HDATA(perf->gidx),
286                                     upper_32_bits(data));
287                 mmiowb();
288                 ntb_peer_spad_write(perf->ntb, peer->pidx,
289                                     PERF_SPAD_CMD(perf->gidx),
290                                     cmd);
291                 mmiowb();
292                 ntb_peer_db_set(perf->ntb, PERF_SPAD_NOTIFY(peer->gidx));
293
294                 dev_dbg(&perf->ntb->dev, "DB ring peer %#llx\n",
295                         PERF_SPAD_NOTIFY(peer->gidx));
296
297                 break;
298         }
299
300         return try < MSG_TRIES ? 0 : -EAGAIN;
301 }
302
303 static int perf_spad_cmd_recv(struct perf_ctx *perf, int *pidx,
304                               enum perf_cmd *cmd, u64 *data)
305 {
306         struct perf_peer *peer;
307         u32 val;
308
309         ntb_db_clear(perf->ntb, PERF_SPAD_NOTIFY(perf->gidx));
310
311         /*
312          * We start scanning all over, since cleared DB may have been set
313          * by any peer. Yes, it makes peer with smaller index being
314          * serviced with greater priority, but it's convenient for spad
315          * and message code unification and simplicity.
316          */
317         for (*pidx = 0; *pidx < perf->pcnt; (*pidx)++) {
318                 peer = &perf->peers[*pidx];
319
320                 if (!perf_link_is_up(peer))
321                         continue;
322
323                 val = ntb_spad_read(perf->ntb, PERF_SPAD_CMD(peer->gidx));
324                 if (val == PERF_CMD_INVAL)
325                         continue;
326
327                 *cmd = val;
328
329                 val = ntb_spad_read(perf->ntb, PERF_SPAD_LDATA(peer->gidx));
330                 *data = val;
331
332                 val = ntb_spad_read(perf->ntb, PERF_SPAD_HDATA(peer->gidx));
333                 *data |= (u64)val << 32;
334
335                 /* Next command can be retrieved from now */
336                 ntb_spad_write(perf->ntb, PERF_SPAD_CMD(peer->gidx),
337                                PERF_CMD_INVAL);
338
339                 dev_dbg(&perf->ntb->dev, "CMD recv: %d 0x%llx\n", *cmd, *data);
340
341                 return 0;
342         }
343
344         return -ENODATA;
345 }
346
347 static int perf_msg_cmd_send(struct perf_peer *peer, enum perf_cmd cmd,
348                              u64 data)
349 {
350         struct perf_ctx *perf = peer->perf;
351         int try, ret;
352         u64 outbits;
353
354         dev_dbg(&perf->ntb->dev, "CMD send: %d 0x%llx\n", cmd, data);
355
356         /*
357          * Perform predefined number of attempts before give up. Message
358          * registers are free of race-condition problem when accessed
359          * from different ports, so we don't need splitting registers
360          * by global device index. We also won't have local locking,
361          * since the method is used from service work only.
362          */
363         outbits = ntb_msg_outbits(perf->ntb);
364         for (try = 0; try < MSG_TRIES; try++) {
365                 if (!perf_link_is_up(peer))
366                         return -ENOLINK;
367
368                 ret = ntb_msg_clear_sts(perf->ntb, outbits);
369                 if (ret)
370                         return ret;
371
372                 ntb_peer_msg_write(perf->ntb, peer->pidx, PERF_MSG_LDATA,
373                                    lower_32_bits(data));
374
375                 if (ntb_msg_read_sts(perf->ntb) & outbits) {
376                         usleep_range(MSG_UDELAY_LOW, MSG_UDELAY_HIGH);
377                         continue;
378                 }
379
380                 ntb_peer_msg_write(perf->ntb, peer->pidx, PERF_MSG_HDATA,
381                                    upper_32_bits(data));
382                 mmiowb();
383
384                 /* This call shall trigger peer message event */
385                 ntb_peer_msg_write(perf->ntb, peer->pidx, PERF_MSG_CMD, cmd);
386
387                 break;
388         }
389
390         return try < MSG_TRIES ? 0 : -EAGAIN;
391 }
392
393 static int perf_msg_cmd_recv(struct perf_ctx *perf, int *pidx,
394                              enum perf_cmd *cmd, u64 *data)
395 {
396         u64 inbits;
397         u32 val;
398
399         inbits = ntb_msg_inbits(perf->ntb);
400
401         if (hweight64(ntb_msg_read_sts(perf->ntb) & inbits) < 3)
402                 return -ENODATA;
403
404         val = ntb_msg_read(perf->ntb, pidx, PERF_MSG_CMD);
405         *cmd = val;
406
407         val = ntb_msg_read(perf->ntb, pidx, PERF_MSG_LDATA);
408         *data = val;
409
410         val = ntb_msg_read(perf->ntb, pidx, PERF_MSG_HDATA);
411         *data |= (u64)val << 32;
412
413         /* Next command can be retrieved from now */
414         ntb_msg_clear_sts(perf->ntb, inbits);
415
416         dev_dbg(&perf->ntb->dev, "CMD recv: %d 0x%llx\n", *cmd, *data);
417
418         return 0;
419 }
420
421 static int perf_cmd_send(struct perf_peer *peer, enum perf_cmd cmd, u64 data)
422 {
423         struct perf_ctx *perf = peer->perf;
424
425         if (cmd == PERF_CMD_SSIZE || cmd == PERF_CMD_SXLAT)
426                 return perf->cmd_send(peer, cmd, data);
427
428         dev_err(&perf->ntb->dev, "Send invalid command\n");
429         return -EINVAL;
430 }
431
432 static int perf_cmd_exec(struct perf_peer *peer, enum perf_cmd cmd)
433 {
434         switch (cmd) {
435         case PERF_CMD_SSIZE:
436         case PERF_CMD_RSIZE:
437         case PERF_CMD_SXLAT:
438         case PERF_CMD_RXLAT:
439         case PERF_CMD_CLEAR:
440                 break;
441         default:
442                 dev_err(&peer->perf->ntb->dev, "Exec invalid command\n");
443                 return -EINVAL;
444         }
445
446         /* No need of memory barrier, since bit ops have invernal lock */
447         set_bit(cmd, &peer->sts);
448
449         dev_dbg(&peer->perf->ntb->dev, "CMD exec: %d\n", cmd);
450
451         (void)queue_work(system_highpri_wq, &peer->service);
452
453         return 0;
454 }
455
456 static int perf_cmd_recv(struct perf_ctx *perf)
457 {
458         struct perf_peer *peer;
459         int ret, pidx, cmd;
460         u64 data;
461
462         while (!(ret = perf->cmd_recv(perf, &pidx, &cmd, &data))) {
463                 peer = &perf->peers[pidx];
464
465                 switch (cmd) {
466                 case PERF_CMD_SSIZE:
467                         peer->inbuf_size = data;
468                         return perf_cmd_exec(peer, PERF_CMD_RSIZE);
469                 case PERF_CMD_SXLAT:
470                         peer->outbuf_xlat = data;
471                         return perf_cmd_exec(peer, PERF_CMD_RXLAT);
472                 default:
473                         dev_err(&perf->ntb->dev, "Recv invalid command\n");
474                         return -EINVAL;
475                 }
476         }
477
478         /* Return 0 if no data left to process, otherwise an error */
479         return ret == -ENODATA ? 0 : ret;
480 }
481
482 static void perf_link_event(void *ctx)
483 {
484         struct perf_ctx *perf = ctx;
485         struct perf_peer *peer;
486         bool lnk_up;
487         int pidx;
488
489         for (pidx = 0; pidx < perf->pcnt; pidx++) {
490                 peer = &perf->peers[pidx];
491
492                 lnk_up = perf_link_is_up(peer);
493
494                 if (lnk_up &&
495                     !test_and_set_bit(PERF_STS_LNKUP, &peer->sts)) {
496                         perf_cmd_exec(peer, PERF_CMD_SSIZE);
497                 } else if (!lnk_up &&
498                            test_and_clear_bit(PERF_STS_LNKUP, &peer->sts)) {
499                         perf_cmd_exec(peer, PERF_CMD_CLEAR);
500                 }
501         }
502 }
503
504 static void perf_db_event(void *ctx, int vec)
505 {
506         struct perf_ctx *perf = ctx;
507
508         dev_dbg(&perf->ntb->dev, "DB vec %d mask %#llx bits %#llx\n", vec,
509                 ntb_db_vector_mask(perf->ntb, vec), ntb_db_read(perf->ntb));
510
511         /* Just receive all available commands */
512         (void)perf_cmd_recv(perf);
513 }
514
515 static void perf_msg_event(void *ctx)
516 {
517         struct perf_ctx *perf = ctx;
518
519         dev_dbg(&perf->ntb->dev, "Msg status bits %#llx\n",
520                 ntb_msg_read_sts(perf->ntb));
521
522         /* Messages are only sent one-by-one */
523         (void)perf_cmd_recv(perf);
524 }
525
526 static const struct ntb_ctx_ops perf_ops = {
527         .link_event = perf_link_event,
528         .db_event = perf_db_event,
529         .msg_event = perf_msg_event
530 };
531
532 static void perf_free_outbuf(struct perf_peer *peer)
533 {
534         (void)ntb_peer_mw_clear_trans(peer->perf->ntb, peer->pidx, peer->gidx);
535 }
536
537 static int perf_setup_outbuf(struct perf_peer *peer)
538 {
539         struct perf_ctx *perf = peer->perf;
540         int ret;
541
542         /* Outbuf size can be unaligned due to custom max_mw_size */
543         ret = ntb_peer_mw_set_trans(perf->ntb, peer->pidx, peer->gidx,
544                                     peer->outbuf_xlat, peer->outbuf_size);
545         if (ret) {
546                 dev_err(&perf->ntb->dev, "Failed to set outbuf translation\n");
547                 return ret;
548         }
549
550         /* Initialization is finally done */
551         set_bit(PERF_STS_DONE, &peer->sts);
552
553         return 0;
554 }
555
556 static void perf_free_inbuf(struct perf_peer *peer)
557 {
558         if (!peer->inbuf)
559                 return;
560
561         (void)ntb_mw_clear_trans(peer->perf->ntb, peer->pidx, peer->gidx);
562         dma_free_coherent(&peer->perf->ntb->dev, peer->inbuf_size,
563                           peer->inbuf, peer->inbuf_xlat);
564         peer->inbuf = NULL;
565 }
566
567 static int perf_setup_inbuf(struct perf_peer *peer)
568 {
569         resource_size_t xlat_align, size_align, size_max;
570         struct perf_ctx *perf = peer->perf;
571         int ret;
572
573         /* Get inbound MW parameters */
574         ret = ntb_mw_get_align(perf->ntb, peer->pidx, perf->gidx,
575                                &xlat_align, &size_align, &size_max);
576         if (ret) {
577                 dev_err(&perf->ntb->dev, "Couldn't get inbuf restrictions\n");
578                 return ret;
579         }
580
581         if (peer->inbuf_size > size_max) {
582                 dev_err(&perf->ntb->dev, "Too big inbuf size %pa > %pa\n",
583                         &peer->inbuf_size, &size_max);
584                 return -EINVAL;
585         }
586
587         peer->inbuf_size = round_up(peer->inbuf_size, size_align);
588
589         perf_free_inbuf(peer);
590
591         peer->inbuf = dma_alloc_coherent(&perf->ntb->dev, peer->inbuf_size,
592                                          &peer->inbuf_xlat, GFP_KERNEL);
593         if (!peer->inbuf) {
594                 dev_err(&perf->ntb->dev, "Failed to alloc inbuf of %pa\n",
595                         &peer->inbuf_size);
596                 return -ENOMEM;
597         }
598         if (!IS_ALIGNED(peer->inbuf_xlat, xlat_align)) {
599                 dev_err(&perf->ntb->dev, "Unaligned inbuf allocated\n");
600                 goto err_free_inbuf;
601         }
602
603         ret = ntb_mw_set_trans(perf->ntb, peer->pidx, peer->gidx,
604                                peer->inbuf_xlat, peer->inbuf_size);
605         if (ret) {
606                 dev_err(&perf->ntb->dev, "Failed to set inbuf translation\n");
607                 goto err_free_inbuf;
608         }
609
610         /*
611          * We submit inbuf xlat transmission cmd for execution here to follow
612          * the code architecture, even though this method is called from service
613          * work itself so the command will be executed right after it returns.
614          */
615         (void)perf_cmd_exec(peer, PERF_CMD_SXLAT);
616
617         return 0;
618
619 err_free_inbuf:
620         perf_free_inbuf(peer);
621
622         return ret;
623 }
624
625 static void perf_service_work(struct work_struct *work)
626 {
627         struct perf_peer *peer = to_peer_service(work);
628
629         if (test_and_clear_bit(PERF_CMD_SSIZE, &peer->sts))
630                 perf_cmd_send(peer, PERF_CMD_SSIZE, peer->outbuf_size);
631
632         if (test_and_clear_bit(PERF_CMD_RSIZE, &peer->sts))
633                 perf_setup_inbuf(peer);
634
635         if (test_and_clear_bit(PERF_CMD_SXLAT, &peer->sts))
636                 perf_cmd_send(peer, PERF_CMD_SXLAT, peer->inbuf_xlat);
637
638         if (test_and_clear_bit(PERF_CMD_RXLAT, &peer->sts))
639                 perf_setup_outbuf(peer);
640
641         if (test_and_clear_bit(PERF_CMD_CLEAR, &peer->sts)) {
642                 clear_bit(PERF_STS_DONE, &peer->sts);
643                 if (test_bit(0, &peer->perf->busy_flag) &&
644                     peer == peer->perf->test_peer) {
645                         dev_warn(&peer->perf->ntb->dev,
646                                 "Freeing while test on-fly\n");
647                         perf_terminate_test(peer->perf);
648                 }
649                 perf_free_outbuf(peer);
650                 perf_free_inbuf(peer);
651         }
652 }
653
654 static int perf_init_service(struct perf_ctx *perf)
655 {
656         u64 mask;
657
658         if (ntb_peer_mw_count(perf->ntb) < perf->pcnt + 1) {
659                 dev_err(&perf->ntb->dev, "Not enough memory windows\n");
660                 return -EINVAL;
661         }
662
663         if (ntb_msg_count(perf->ntb) >= PERF_MSG_CNT) {
664                 perf->cmd_send = perf_msg_cmd_send;
665                 perf->cmd_recv = perf_msg_cmd_recv;
666
667                 dev_dbg(&perf->ntb->dev, "Message service initialized\n");
668
669                 return 0;
670         }
671
672         dev_dbg(&perf->ntb->dev, "Message service unsupported\n");
673
674         mask = GENMASK_ULL(perf->pcnt, 0);
675         if (ntb_spad_count(perf->ntb) >= PERF_SPAD_CNT(perf->pcnt) &&
676             (ntb_db_valid_mask(perf->ntb) & mask) == mask) {
677                 perf->cmd_send = perf_spad_cmd_send;
678                 perf->cmd_recv = perf_spad_cmd_recv;
679
680                 dev_dbg(&perf->ntb->dev, "Scratchpad service initialized\n");
681
682                 return 0;
683         }
684
685         dev_dbg(&perf->ntb->dev, "Scratchpad service unsupported\n");
686
687         dev_err(&perf->ntb->dev, "Command services unsupported\n");
688
689         return -EINVAL;
690 }
691
692 static int perf_enable_service(struct perf_ctx *perf)
693 {
694         u64 mask, incmd_bit;
695         int ret, sidx, scnt;
696
697         mask = ntb_db_valid_mask(perf->ntb);
698         (void)ntb_db_set_mask(perf->ntb, mask);
699
700         ret = ntb_set_ctx(perf->ntb, perf, &perf_ops);
701         if (ret)
702                 return ret;
703
704         if (perf->cmd_send == perf_msg_cmd_send) {
705                 u64 inbits, outbits;
706
707                 inbits = ntb_msg_inbits(perf->ntb);
708                 outbits = ntb_msg_outbits(perf->ntb);
709                 (void)ntb_msg_set_mask(perf->ntb, inbits | outbits);
710
711                 incmd_bit = BIT_ULL(__ffs64(inbits));
712                 ret = ntb_msg_clear_mask(perf->ntb, incmd_bit);
713
714                 dev_dbg(&perf->ntb->dev, "MSG sts unmasked %#llx\n", incmd_bit);
715         } else {
716                 scnt = ntb_spad_count(perf->ntb);
717                 for (sidx = 0; sidx < scnt; sidx++)
718                         ntb_spad_write(perf->ntb, sidx, PERF_CMD_INVAL);
719                 incmd_bit = PERF_SPAD_NOTIFY(perf->gidx);
720                 ret = ntb_db_clear_mask(perf->ntb, incmd_bit);
721
722                 dev_dbg(&perf->ntb->dev, "DB bits unmasked %#llx\n", incmd_bit);
723         }
724         if (ret) {
725                 ntb_clear_ctx(perf->ntb);
726                 return ret;
727         }
728
729         ntb_link_enable(perf->ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
730         /* Might be not necessary */
731         ntb_link_event(perf->ntb);
732
733         return 0;
734 }
735
736 static void perf_disable_service(struct perf_ctx *perf)
737 {
738         int pidx;
739
740         ntb_link_disable(perf->ntb);
741
742         if (perf->cmd_send == perf_msg_cmd_send) {
743                 u64 inbits;
744
745                 inbits = ntb_msg_inbits(perf->ntb);
746                 (void)ntb_msg_set_mask(perf->ntb, inbits);
747         } else {
748                 (void)ntb_db_set_mask(perf->ntb, PERF_SPAD_NOTIFY(perf->gidx));
749         }
750
751         ntb_clear_ctx(perf->ntb);
752
753         for (pidx = 0; pidx < perf->pcnt; pidx++)
754                 perf_cmd_exec(&perf->peers[pidx], PERF_CMD_CLEAR);
755
756         for (pidx = 0; pidx < perf->pcnt; pidx++)
757                 flush_work(&perf->peers[pidx].service);
758 }
759
760 /*==============================================================================
761  *                      Performance measuring work-thread
762  *==============================================================================
763  */
764
765 static void perf_dma_copy_callback(void *data)
766 {
767         struct perf_thread *pthr = data;
768
769         atomic_dec(&pthr->dma_sync);
770         wake_up(&pthr->dma_wait);
771 }
772
773 static int perf_copy_chunk(struct perf_thread *pthr,
774                            void __iomem *dst, void *src, size_t len)
775 {
776         struct dma_async_tx_descriptor *tx;
777         struct dmaengine_unmap_data *unmap;
778         struct device *dma_dev;
779         int try = 0, ret = 0;
780
781         if (!use_dma) {
782                 memcpy_toio(dst, src, len);
783                 goto ret_check_tsync;
784         }
785
786         dma_dev = pthr->dma_chan->device->dev;
787
788         if (!is_dma_copy_aligned(pthr->dma_chan->device, offset_in_page(src),
789                                  offset_in_page(dst), len))
790                 return -EIO;
791
792         unmap = dmaengine_get_unmap_data(dma_dev, 2, GFP_NOWAIT);
793         if (!unmap)
794                 return -ENOMEM;
795
796         unmap->len = len;
797         unmap->addr[0] = dma_map_page(dma_dev, virt_to_page(src),
798                 offset_in_page(src), len, DMA_TO_DEVICE);
799         if (dma_mapping_error(dma_dev, unmap->addr[0])) {
800                 ret = -EIO;
801                 goto err_free_resource;
802         }
803         unmap->to_cnt = 1;
804
805         unmap->addr[1] = dma_map_page(dma_dev, virt_to_page(dst),
806                 offset_in_page(dst), len, DMA_FROM_DEVICE);
807         if (dma_mapping_error(dma_dev, unmap->addr[1])) {
808                 ret = -EIO;
809                 goto err_free_resource;
810         }
811         unmap->from_cnt = 1;
812
813         do {
814                 tx = dmaengine_prep_dma_memcpy(pthr->dma_chan, unmap->addr[1],
815                         unmap->addr[0], len, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
816                 if (!tx)
817                         msleep(DMA_MDELAY);
818         } while (!tx && (try++ < DMA_TRIES));
819
820         if (!tx) {
821                 ret = -EIO;
822                 goto err_free_resource;
823         }
824
825         tx->callback = perf_dma_copy_callback;
826         tx->callback_param = pthr;
827         dma_set_unmap(tx, unmap);
828
829         ret = dma_submit_error(dmaengine_submit(tx));
830         if (ret) {
831                 dmaengine_unmap_put(unmap);
832                 goto err_free_resource;
833         }
834
835         dmaengine_unmap_put(unmap);
836
837         atomic_inc(&pthr->dma_sync);
838         dma_async_issue_pending(pthr->dma_chan);
839
840 ret_check_tsync:
841         return likely(atomic_read(&pthr->perf->tsync) > 0) ? 0 : -EINTR;
842
843 err_free_resource:
844         dmaengine_unmap_put(unmap);
845
846         return ret;
847 }
848
849 static bool perf_dma_filter(struct dma_chan *chan, void *data)
850 {
851         struct perf_ctx *perf = data;
852         int node;
853
854         node = dev_to_node(&perf->ntb->dev);
855
856         return node == NUMA_NO_NODE || node == dev_to_node(chan->device->dev);
857 }
858
859 static int perf_init_test(struct perf_thread *pthr)
860 {
861         struct perf_ctx *perf = pthr->perf;
862         dma_cap_mask_t dma_mask;
863
864         pthr->src = kmalloc_node(perf->test_peer->outbuf_size, GFP_KERNEL,
865                                  dev_to_node(&perf->ntb->dev));
866         if (!pthr->src)
867                 return -ENOMEM;
868
869         get_random_bytes(pthr->src, perf->test_peer->outbuf_size);
870
871         if (!use_dma)
872                 return 0;
873
874         dma_cap_zero(dma_mask);
875         dma_cap_set(DMA_MEMCPY, dma_mask);
876         pthr->dma_chan = dma_request_channel(dma_mask, perf_dma_filter, perf);
877         if (!pthr->dma_chan) {
878                 dev_err(&perf->ntb->dev, "%d: Failed to get DMA channel\n",
879                         pthr->tidx);
880                 atomic_dec(&perf->tsync);
881                 wake_up(&perf->twait);
882                 kfree(pthr->src);
883                 return -ENODEV;
884         }
885
886         atomic_set(&pthr->dma_sync, 0);
887
888         return 0;
889 }
890
891 static int perf_run_test(struct perf_thread *pthr)
892 {
893         struct perf_peer *peer = pthr->perf->test_peer;
894         struct perf_ctx *perf = pthr->perf;
895         void __iomem *flt_dst, *bnd_dst;
896         u64 total_size, chunk_size;
897         void *flt_src;
898         int ret = 0;
899
900         total_size = 1ULL << total_order;
901         chunk_size = 1ULL << chunk_order;
902         chunk_size = min_t(u64, peer->outbuf_size, chunk_size);
903
904         flt_src = pthr->src;
905         bnd_dst = peer->outbuf + peer->outbuf_size;
906         flt_dst = peer->outbuf;
907
908         pthr->duration = ktime_get();
909
910         /* Copied field is cleared on test launch stage */
911         while (pthr->copied < total_size) {
912                 ret = perf_copy_chunk(pthr, flt_dst, flt_src, chunk_size);
913                 if (ret) {
914                         dev_err(&perf->ntb->dev, "%d: Got error %d on test\n",
915                                 pthr->tidx, ret);
916                         return ret;
917                 }
918
919                 pthr->copied += chunk_size;
920
921                 flt_dst += chunk_size;
922                 flt_src += chunk_size;
923                 if (flt_dst >= bnd_dst || flt_dst < peer->outbuf) {
924                         flt_dst = peer->outbuf;
925                         flt_src = pthr->src;
926                 }
927
928                 /* Give up CPU to give a chance for other threads to use it */
929                 schedule();
930         }
931
932         return 0;
933 }
934
935 static int perf_sync_test(struct perf_thread *pthr)
936 {
937         struct perf_ctx *perf = pthr->perf;
938
939         if (!use_dma)
940                 goto no_dma_ret;
941
942         wait_event(pthr->dma_wait,
943                    (atomic_read(&pthr->dma_sync) == 0 ||
944                     atomic_read(&perf->tsync) < 0));
945
946         if (atomic_read(&perf->tsync) < 0)
947                 return -EINTR;
948
949 no_dma_ret:
950         pthr->duration = ktime_sub(ktime_get(), pthr->duration);
951
952         dev_dbg(&perf->ntb->dev, "%d: copied %llu bytes\n",
953                 pthr->tidx, pthr->copied);
954
955         dev_dbg(&perf->ntb->dev, "%d: lasted %llu usecs\n",
956                 pthr->tidx, ktime_to_us(pthr->duration));
957
958         dev_dbg(&perf->ntb->dev, "%d: %llu MBytes/s\n", pthr->tidx,
959                 div64_u64(pthr->copied, ktime_to_us(pthr->duration)));
960
961         return 0;
962 }
963
964 static void perf_clear_test(struct perf_thread *pthr)
965 {
966         struct perf_ctx *perf = pthr->perf;
967
968         if (!use_dma)
969                 goto no_dma_notify;
970
971         /*
972          * If test finished without errors, termination isn't needed.
973          * We call it anyway just to be sure of the transfers completion.
974          */
975         (void)dmaengine_terminate_sync(pthr->dma_chan);
976
977         dma_release_channel(pthr->dma_chan);
978
979 no_dma_notify:
980         atomic_dec(&perf->tsync);
981         wake_up(&perf->twait);
982         kfree(pthr->src);
983 }
984
985 static void perf_thread_work(struct work_struct *work)
986 {
987         struct perf_thread *pthr = to_thread_work(work);
988         int ret;
989
990         /*
991          * Perform stages in compliance with use_dma flag value.
992          * Test status is changed only if error happened, otherwise
993          * status -ENODATA is kept while test is on-fly. Results
994          * synchronization is performed only if test fininshed
995          * without an error or interruption.
996          */
997         ret = perf_init_test(pthr);
998         if (ret) {
999                 pthr->status = ret;
1000                 return;
1001         }
1002
1003         ret = perf_run_test(pthr);
1004         if (ret) {
1005                 pthr->status = ret;
1006                 goto err_clear_test;
1007         }
1008
1009         pthr->status = perf_sync_test(pthr);
1010
1011 err_clear_test:
1012         perf_clear_test(pthr);
1013 }
1014
1015 static int perf_set_tcnt(struct perf_ctx *perf, u8 tcnt)
1016 {
1017         if (tcnt == 0 || tcnt > MAX_THREADS_CNT)
1018                 return -EINVAL;
1019
1020         if (test_and_set_bit_lock(0, &perf->busy_flag))
1021                 return -EBUSY;
1022
1023         perf->tcnt = tcnt;
1024
1025         clear_bit_unlock(0, &perf->busy_flag);
1026
1027         return 0;
1028 }
1029
1030 static void perf_terminate_test(struct perf_ctx *perf)
1031 {
1032         int tidx;
1033
1034         atomic_set(&perf->tsync, -1);
1035         wake_up(&perf->twait);
1036
1037         for (tidx = 0; tidx < MAX_THREADS_CNT; tidx++) {
1038                 wake_up(&perf->threads[tidx].dma_wait);
1039                 cancel_work_sync(&perf->threads[tidx].work);
1040         }
1041 }
1042
1043 static int perf_submit_test(struct perf_peer *peer)
1044 {
1045         struct perf_ctx *perf = peer->perf;
1046         struct perf_thread *pthr;
1047         int tidx, ret;
1048
1049         if (!test_bit(PERF_STS_DONE, &peer->sts))
1050                 return -ENOLINK;
1051
1052         if (test_and_set_bit_lock(0, &perf->busy_flag))
1053                 return -EBUSY;
1054
1055         perf->test_peer = peer;
1056         atomic_set(&perf->tsync, perf->tcnt);
1057
1058         for (tidx = 0; tidx < MAX_THREADS_CNT; tidx++) {
1059                 pthr = &perf->threads[tidx];
1060
1061                 pthr->status = -ENODATA;
1062                 pthr->copied = 0;
1063                 pthr->duration = ktime_set(0, 0);
1064                 if (tidx < perf->tcnt)
1065                         (void)queue_work(perf_wq, &pthr->work);
1066         }
1067
1068         ret = wait_event_interruptible(perf->twait,
1069                                        atomic_read(&perf->tsync) <= 0);
1070         if (ret == -ERESTARTSYS) {
1071                 perf_terminate_test(perf);
1072                 ret = -EINTR;
1073         }
1074
1075         clear_bit_unlock(0, &perf->busy_flag);
1076
1077         return ret;
1078 }
1079
1080 static int perf_read_stats(struct perf_ctx *perf, char *buf,
1081                            size_t size, ssize_t *pos)
1082 {
1083         struct perf_thread *pthr;
1084         int tidx;
1085
1086         if (test_and_set_bit_lock(0, &perf->busy_flag))
1087                 return -EBUSY;
1088
1089         (*pos) += scnprintf(buf + *pos, size - *pos,
1090                 "    Peer %d test statistics:\n", perf->test_peer->pidx);
1091
1092         for (tidx = 0; tidx < MAX_THREADS_CNT; tidx++) {
1093                 pthr = &perf->threads[tidx];
1094
1095                 if (pthr->status == -ENODATA)
1096                         continue;
1097
1098                 if (pthr->status) {
1099                         (*pos) += scnprintf(buf + *pos, size - *pos,
1100                                 "%d: error status %d\n", tidx, pthr->status);
1101                         continue;
1102                 }
1103
1104                 (*pos) += scnprintf(buf + *pos, size - *pos,
1105                         "%d: copied %llu bytes in %llu usecs, %llu MBytes/s\n",
1106                         tidx, pthr->copied, ktime_to_us(pthr->duration),
1107                         div64_u64(pthr->copied, ktime_to_us(pthr->duration)));
1108         }
1109
1110         clear_bit_unlock(0, &perf->busy_flag);
1111
1112         return 0;
1113 }
1114
1115 static void perf_init_threads(struct perf_ctx *perf)
1116 {
1117         struct perf_thread *pthr;
1118         int tidx;
1119
1120         perf->tcnt = DEF_THREADS_CNT;
1121         perf->test_peer = &perf->peers[0];
1122         init_waitqueue_head(&perf->twait);
1123
1124         for (tidx = 0; tidx < MAX_THREADS_CNT; tidx++) {
1125                 pthr = &perf->threads[tidx];
1126
1127                 pthr->perf = perf;
1128                 pthr->tidx = tidx;
1129                 pthr->status = -ENODATA;
1130                 init_waitqueue_head(&pthr->dma_wait);
1131                 INIT_WORK(&pthr->work, perf_thread_work);
1132         }
1133 }
1134
1135 static void perf_clear_threads(struct perf_ctx *perf)
1136 {
1137         perf_terminate_test(perf);
1138 }
1139
1140 /*==============================================================================
1141  *                               DebugFS nodes
1142  *==============================================================================
1143  */
1144
1145 static ssize_t perf_dbgfs_read_info(struct file *filep, char __user *ubuf,
1146                                     size_t size, loff_t *offp)
1147 {
1148         struct perf_ctx *perf = filep->private_data;
1149         struct perf_peer *peer;
1150         size_t buf_size;
1151         ssize_t pos = 0;
1152         int ret, pidx;
1153         char *buf;
1154
1155         buf_size = min_t(size_t, size, 0x1000U);
1156
1157         buf = kmalloc(buf_size, GFP_KERNEL);
1158         if (!buf)
1159                 return -ENOMEM;
1160
1161         pos += scnprintf(buf + pos, buf_size - pos,
1162                 "    Performance measuring tool info:\n\n");
1163
1164         pos += scnprintf(buf + pos, buf_size - pos,
1165                 "Local port %d, Global index %d\n", ntb_port_number(perf->ntb),
1166                 perf->gidx);
1167         pos += scnprintf(buf + pos, buf_size - pos, "Test status: ");
1168         if (test_bit(0, &perf->busy_flag)) {
1169                 pos += scnprintf(buf + pos, buf_size - pos,
1170                         "on-fly with port %d (%d)\n",
1171                         ntb_peer_port_number(perf->ntb, perf->test_peer->pidx),
1172                         perf->test_peer->pidx);
1173         } else {
1174                 pos += scnprintf(buf + pos, buf_size - pos, "idle\n");
1175         }
1176
1177         for (pidx = 0; pidx < perf->pcnt; pidx++) {
1178                 peer = &perf->peers[pidx];
1179
1180                 pos += scnprintf(buf + pos, buf_size - pos,
1181                         "Port %d (%d), Global index %d:\n",
1182                         ntb_peer_port_number(perf->ntb, peer->pidx), peer->pidx,
1183                         peer->gidx);
1184
1185                 pos += scnprintf(buf + pos, buf_size - pos,
1186                         "\tLink status: %s\n",
1187                         test_bit(PERF_STS_LNKUP, &peer->sts) ? "up" : "down");
1188
1189                 pos += scnprintf(buf + pos, buf_size - pos,
1190                         "\tOut buffer addr 0x%pK\n", peer->outbuf);
1191
1192                 pos += scnprintf(buf + pos, buf_size - pos,
1193                         "\tOut buffer size %pa\n", &peer->outbuf_size);
1194
1195                 pos += scnprintf(buf + pos, buf_size - pos,
1196                         "\tOut buffer xlat 0x%016llx[p]\n", peer->outbuf_xlat);
1197
1198                 if (!peer->inbuf) {
1199                         pos += scnprintf(buf + pos, buf_size - pos,
1200                                 "\tIn buffer addr: unallocated\n");
1201                         continue;
1202                 }
1203
1204                 pos += scnprintf(buf + pos, buf_size - pos,
1205                         "\tIn buffer addr 0x%pK\n", peer->inbuf);
1206
1207                 pos += scnprintf(buf + pos, buf_size - pos,
1208                         "\tIn buffer size %pa\n", &peer->inbuf_size);
1209
1210                 pos += scnprintf(buf + pos, buf_size - pos,
1211                         "\tIn buffer xlat %pad[p]\n", &peer->inbuf_xlat);
1212         }
1213
1214         ret = simple_read_from_buffer(ubuf, size, offp, buf, pos);
1215         kfree(buf);
1216
1217         return ret;
1218 }
1219
1220 static const struct file_operations perf_dbgfs_info = {
1221         .open = simple_open,
1222         .read = perf_dbgfs_read_info
1223 };
1224
1225 static ssize_t perf_dbgfs_read_run(struct file *filep, char __user *ubuf,
1226                                    size_t size, loff_t *offp)
1227 {
1228         struct perf_ctx *perf = filep->private_data;
1229         ssize_t ret, pos = 0;
1230         char *buf;
1231
1232         buf = kmalloc(PERF_BUF_LEN, GFP_KERNEL);
1233         if (!buf)
1234                 return -ENOMEM;
1235
1236         ret = perf_read_stats(perf, buf, PERF_BUF_LEN, &pos);
1237         if (ret)
1238                 goto err_free;
1239
1240         ret = simple_read_from_buffer(ubuf, size, offp, buf, pos);
1241 err_free:
1242         kfree(buf);
1243
1244         return ret;
1245 }
1246
1247 static ssize_t perf_dbgfs_write_run(struct file *filep, const char __user *ubuf,
1248                                     size_t size, loff_t *offp)
1249 {
1250         struct perf_ctx *perf = filep->private_data;
1251         struct perf_peer *peer;
1252         int pidx, ret;
1253
1254         ret = kstrtoint_from_user(ubuf, size, 0, &pidx);
1255         if (ret)
1256                 return ret;
1257
1258         if (pidx < 0 || pidx >= perf->pcnt)
1259                 return -EINVAL;
1260
1261         peer = &perf->peers[pidx];
1262
1263         ret = perf_submit_test(peer);
1264         if (ret)
1265                 return ret;
1266
1267         return size;
1268 }
1269
1270 static const struct file_operations perf_dbgfs_run = {
1271         .open = simple_open,
1272         .read = perf_dbgfs_read_run,
1273         .write = perf_dbgfs_write_run
1274 };
1275
1276 static ssize_t perf_dbgfs_read_tcnt(struct file *filep, char __user *ubuf,
1277                                     size_t size, loff_t *offp)
1278 {
1279         struct perf_ctx *perf = filep->private_data;
1280         char buf[8];
1281         ssize_t pos;
1282
1283         pos = scnprintf(buf, sizeof(buf), "%hhu\n", perf->tcnt);
1284
1285         return simple_read_from_buffer(ubuf, size, offp, buf, pos);
1286 }
1287
1288 static ssize_t perf_dbgfs_write_tcnt(struct file *filep,
1289                                      const char __user *ubuf,
1290                                      size_t size, loff_t *offp)
1291 {
1292         struct perf_ctx *perf = filep->private_data;
1293         int ret;
1294         u8 val;
1295
1296         ret = kstrtou8_from_user(ubuf, size, 0, &val);
1297         if (ret)
1298                 return ret;
1299
1300         ret = perf_set_tcnt(perf, val);
1301         if (ret)
1302                 return ret;
1303
1304         return size;
1305 }
1306
1307 static const struct file_operations perf_dbgfs_tcnt = {
1308         .open = simple_open,
1309         .read = perf_dbgfs_read_tcnt,
1310         .write = perf_dbgfs_write_tcnt
1311 };
1312
1313 static void perf_setup_dbgfs(struct perf_ctx *perf)
1314 {
1315         struct pci_dev *pdev = perf->ntb->pdev;
1316
1317         perf->dbgfs_dir = debugfs_create_dir(pci_name(pdev), perf_dbgfs_topdir);
1318         if (!perf->dbgfs_dir) {
1319                 dev_warn(&perf->ntb->dev, "DebugFS unsupported\n");
1320                 return;
1321         }
1322
1323         debugfs_create_file("info", 0600, perf->dbgfs_dir, perf,
1324                             &perf_dbgfs_info);
1325
1326         debugfs_create_file("run", 0600, perf->dbgfs_dir, perf,
1327                             &perf_dbgfs_run);
1328
1329         debugfs_create_file("threads_count", 0600, perf->dbgfs_dir, perf,
1330                             &perf_dbgfs_tcnt);
1331
1332         /* They are made read-only for test exec safety and integrity */
1333         debugfs_create_u8("chunk_order", 0500, perf->dbgfs_dir, &chunk_order);
1334
1335         debugfs_create_u8("total_order", 0500, perf->dbgfs_dir, &total_order);
1336
1337         debugfs_create_bool("use_dma", 0500, perf->dbgfs_dir, &use_dma);
1338 }
1339
1340 static void perf_clear_dbgfs(struct perf_ctx *perf)
1341 {
1342         debugfs_remove_recursive(perf->dbgfs_dir);
1343 }
1344
1345 /*==============================================================================
1346  *                        Basic driver initialization
1347  *==============================================================================
1348  */
1349
1350 static struct perf_ctx *perf_create_data(struct ntb_dev *ntb)
1351 {
1352         struct perf_ctx *perf;
1353
1354         perf = devm_kzalloc(&ntb->dev, sizeof(*perf), GFP_KERNEL);
1355         if (!perf)
1356                 return ERR_PTR(-ENOMEM);
1357
1358         perf->pcnt = ntb_peer_port_count(ntb);
1359         perf->peers = devm_kcalloc(&ntb->dev, perf->pcnt, sizeof(*perf->peers),
1360                                   GFP_KERNEL);
1361         if (!perf->peers)
1362                 return ERR_PTR(-ENOMEM);
1363
1364         perf->ntb = ntb;
1365
1366         return perf;
1367 }
1368
1369 static int perf_setup_peer_mw(struct perf_peer *peer)
1370 {
1371         struct perf_ctx *perf = peer->perf;
1372         phys_addr_t phys_addr;
1373         int ret;
1374
1375         /* Get outbound MW parameters and map it */
1376         ret = ntb_peer_mw_get_addr(perf->ntb, perf->gidx, &phys_addr,
1377                                    &peer->outbuf_size);
1378         if (ret)
1379                 return ret;
1380
1381         peer->outbuf = devm_ioremap_wc(&perf->ntb->dev, phys_addr,
1382                                         peer->outbuf_size);
1383         if (!peer->outbuf)
1384                 return -ENOMEM;
1385
1386         if (max_mw_size && peer->outbuf_size > max_mw_size) {
1387                 peer->outbuf_size = max_mw_size;
1388                 dev_warn(&peer->perf->ntb->dev,
1389                         "Peer %d outbuf reduced to %pa\n", peer->pidx,
1390                         &peer->outbuf_size);
1391         }
1392
1393         return 0;
1394 }
1395
1396 static int perf_init_peers(struct perf_ctx *perf)
1397 {
1398         struct perf_peer *peer;
1399         int pidx, lport, ret;
1400
1401         lport = ntb_port_number(perf->ntb);
1402         perf->gidx = -1;
1403         for (pidx = 0; pidx < perf->pcnt; pidx++) {
1404                 peer = &perf->peers[pidx];
1405
1406                 peer->perf = perf;
1407                 peer->pidx = pidx;
1408                 if (lport < ntb_peer_port_number(perf->ntb, pidx)) {
1409                         if (perf->gidx == -1)
1410                                 perf->gidx = pidx;
1411                         peer->gidx = pidx + 1;
1412                 } else {
1413                         peer->gidx = pidx;
1414                 }
1415                 INIT_WORK(&peer->service, perf_service_work);
1416         }
1417         if (perf->gidx == -1)
1418                 perf->gidx = pidx;
1419
1420         for (pidx = 0; pidx < perf->pcnt; pidx++) {
1421                 ret = perf_setup_peer_mw(&perf->peers[pidx]);
1422                 if (ret)
1423                         return ret;
1424         }
1425
1426         dev_dbg(&perf->ntb->dev, "Global port index %d\n", perf->gidx);
1427
1428         return 0;
1429 }
1430
1431 static int perf_probe(struct ntb_client *client, struct ntb_dev *ntb)
1432 {
1433         struct perf_ctx *perf;
1434         int ret;
1435
1436         perf = perf_create_data(ntb);
1437         if (IS_ERR(perf))
1438                 return PTR_ERR(perf);
1439
1440         ret = perf_init_peers(perf);
1441         if (ret)
1442                 return ret;
1443
1444         perf_init_threads(perf);
1445
1446         ret = perf_init_service(perf);
1447         if (ret)
1448                 return ret;
1449
1450         ret = perf_enable_service(perf);
1451         if (ret)
1452                 return ret;
1453
1454         perf_setup_dbgfs(perf);
1455
1456         return 0;
1457 }
1458
1459 static void perf_remove(struct ntb_client *client, struct ntb_dev *ntb)
1460 {
1461         struct perf_ctx *perf = ntb->ctx;
1462
1463         perf_clear_dbgfs(perf);
1464
1465         perf_disable_service(perf);
1466
1467         perf_clear_threads(perf);
1468 }
1469
1470 static struct ntb_client perf_client = {
1471         .ops = {
1472                 .probe = perf_probe,
1473                 .remove = perf_remove
1474         }
1475 };
1476
1477 static int __init perf_init(void)
1478 {
1479         int ret;
1480
1481         if (chunk_order > MAX_CHUNK_ORDER) {
1482                 chunk_order = MAX_CHUNK_ORDER;
1483                 pr_info("Chunk order reduced to %hhu\n", chunk_order);
1484         }
1485
1486         if (total_order < chunk_order) {
1487                 total_order = chunk_order;
1488                 pr_info("Total data order reduced to %hhu\n", total_order);
1489         }
1490
1491         perf_wq = alloc_workqueue("perf_wq", WQ_UNBOUND | WQ_SYSFS, 0);
1492         if (!perf_wq)
1493                 return -ENOMEM;
1494
1495         if (debugfs_initialized())
1496                 perf_dbgfs_topdir = debugfs_create_dir(KBUILD_MODNAME, NULL);
1497
1498         ret = ntb_register_client(&perf_client);
1499         if (ret) {
1500                 debugfs_remove_recursive(perf_dbgfs_topdir);
1501                 destroy_workqueue(perf_wq);
1502         }
1503
1504         return ret;
1505 }
1506 module_init(perf_init);
1507
1508 static void __exit perf_exit(void)
1509 {
1510         ntb_unregister_client(&perf_client);
1511         debugfs_remove_recursive(perf_dbgfs_topdir);
1512         destroy_workqueue(perf_wq);
1513 }
1514 module_exit(perf_exit);
1515