Linux-libre 4.19.8-gnu
[librecmc/linux-libre.git] / drivers / net / ethernet / cavium / liquidio / response_manager.c
1 /**********************************************************************
2  * Author: Cavium, Inc.
3  *
4  * Contact: support@cavium.com
5  *          Please include "LiquidIO" in the subject.
6  *
7  * Copyright (c) 2003-2016 Cavium, Inc.
8  *
9  * This file is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License, Version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This file is distributed in the hope that it will be useful, but
14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16  * NONINFRINGEMENT.  See the GNU General Public License for more
17  * details.
18  **********************************************************************/
19 #include <linux/pci.h>
20 #include <linux/netdevice.h>
21 #include "liquidio_common.h"
22 #include "octeon_droq.h"
23 #include "octeon_iq.h"
24 #include "response_manager.h"
25 #include "octeon_device.h"
26 #include "octeon_main.h"
27
28 static void oct_poll_req_completion(struct work_struct *work);
29
30 int octeon_setup_response_list(struct octeon_device *oct)
31 {
32         int i, ret = 0;
33         struct cavium_wq *cwq;
34
35         for (i = 0; i < MAX_RESPONSE_LISTS; i++) {
36                 INIT_LIST_HEAD(&oct->response_list[i].head);
37                 spin_lock_init(&oct->response_list[i].lock);
38                 atomic_set(&oct->response_list[i].pending_req_count, 0);
39         }
40         spin_lock_init(&oct->cmd_resp_wqlock);
41
42         oct->dma_comp_wq.wq = alloc_workqueue("dma-comp", WQ_MEM_RECLAIM, 0);
43         if (!oct->dma_comp_wq.wq) {
44                 dev_err(&oct->pci_dev->dev, "failed to create wq thread\n");
45                 return -ENOMEM;
46         }
47
48         cwq = &oct->dma_comp_wq;
49         INIT_DELAYED_WORK(&cwq->wk.work, oct_poll_req_completion);
50         cwq->wk.ctxptr = oct;
51         oct->cmd_resp_state = OCT_DRV_ONLINE;
52
53         return ret;
54 }
55
56 void octeon_delete_response_list(struct octeon_device *oct)
57 {
58         cancel_delayed_work_sync(&oct->dma_comp_wq.wk.work);
59         destroy_workqueue(oct->dma_comp_wq.wq);
60 }
61
62 int lio_process_ordered_list(struct octeon_device *octeon_dev,
63                              u32 force_quit)
64 {
65         struct octeon_response_list *ordered_sc_list;
66         struct octeon_soft_command *sc;
67         int request_complete = 0;
68         int resp_to_process = MAX_ORD_REQS_TO_PROCESS;
69         u32 status;
70         u64 status64;
71
72         ordered_sc_list = &octeon_dev->response_list[OCTEON_ORDERED_SC_LIST];
73
74         do {
75                 spin_lock_bh(&ordered_sc_list->lock);
76
77                 if (list_empty(&ordered_sc_list->head)) {
78                         spin_unlock_bh(&ordered_sc_list->lock);
79                         return 1;
80                 }
81
82                 sc = list_first_entry(&ordered_sc_list->head,
83                                       struct octeon_soft_command, node);
84
85                 status = OCTEON_REQUEST_PENDING;
86
87                 /* check if octeon has finished DMA'ing a response
88                  * to where rptr is pointing to
89                  */
90                 status64 = *sc->status_word;
91
92                 if (status64 != COMPLETION_WORD_INIT) {
93                         /* This logic ensures that all 64b have been written.
94                          * 1. check byte 0 for non-FF
95                          * 2. if non-FF, then swap result from BE to host order
96                          * 3. check byte 7 (swapped to 0) for non-FF
97                          * 4. if non-FF, use the low 32-bit status code
98                          * 5. if either byte 0 or byte 7 is FF, don't use status
99                          */
100                         if ((status64 & 0xff) != 0xff) {
101                                 octeon_swap_8B_data(&status64, 1);
102                                 if (((status64 & 0xff) != 0xff)) {
103                                         /* retrieve 16-bit firmware status */
104                                         status = (u32)(status64 & 0xffffULL);
105                                         if (status) {
106                                                 status =
107                                                   FIRMWARE_STATUS_CODE(status);
108                                         } else {
109                                                 /* i.e. no error */
110                                                 status = OCTEON_REQUEST_DONE;
111                                         }
112                                 }
113                         }
114                 } else if (force_quit || (sc->timeout &&
115                         time_after(jiffies, (unsigned long)sc->timeout))) {
116                         dev_err(&octeon_dev->pci_dev->dev, "%s: cmd failed, timeout (%ld, %ld)\n",
117                                 __func__, (long)jiffies, (long)sc->timeout);
118                         status = OCTEON_REQUEST_TIMEOUT;
119                 }
120
121                 if (status != OCTEON_REQUEST_PENDING) {
122                         /* we have received a response or we have timed out */
123                         /* remove node from linked list */
124                         list_del(&sc->node);
125                         atomic_dec(&octeon_dev->response_list
126                                           [OCTEON_ORDERED_SC_LIST].
127                                           pending_req_count);
128                         spin_unlock_bh
129                             (&ordered_sc_list->lock);
130
131                         if (sc->callback)
132                                 sc->callback(octeon_dev, status,
133                                              sc->callback_arg);
134
135                         request_complete++;
136
137                 } else {
138                         /* no response yet */
139                         request_complete = 0;
140                         spin_unlock_bh
141                             (&ordered_sc_list->lock);
142                 }
143
144                 /* If we hit the Max Ordered requests to process every loop,
145                  * we quit
146                  * and let this function be invoked the next time the poll
147                  * thread runs
148                  * to process the remaining requests. This function can take up
149                  * the entire CPU if there is no upper limit to the requests
150                  * processed.
151                  */
152                 if (request_complete >= resp_to_process)
153                         break;
154         } while (request_complete);
155
156         return 0;
157 }
158
159 static void oct_poll_req_completion(struct work_struct *work)
160 {
161         struct cavium_wk *wk = (struct cavium_wk *)work;
162         struct octeon_device *oct = (struct octeon_device *)wk->ctxptr;
163         struct cavium_wq *cwq = &oct->dma_comp_wq;
164
165         lio_process_ordered_list(oct, 0);
166
167         if (atomic_read(&oct->response_list
168                         [OCTEON_ORDERED_SC_LIST].pending_req_count))
169                 queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(1));
170 }