Linux-libre 2.6.32.58-gnu1
[librecmc/linux-libre.git] / drivers / staging / octeon / cvmx-cmd-queue.c
1 /***********************license start***************
2  * Author: Cavium Networks
3  *
4  * Contact: support@caviumnetworks.com
5  * This file is part of the OCTEON SDK
6  *
7  * Copyright (c) 2003-2008 Cavium Networks
8  *
9  * This file is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License, Version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This file is distributed in the hope that it will be useful, but
14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16  * NONINFRINGEMENT.  See the GNU General Public License for more
17  * details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this file; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22  * or visit http://www.gnu.org/licenses/.
23  *
24  * This file may also be available under a different license from Cavium.
25  * Contact Cavium Networks for more information
26  ***********************license end**************************************/
27
28 /*
29  * Support functions for managing command queues used for
30  * various hardware blocks.
31  */
32
33 #include <linux/kernel.h>
34
35 #include <asm/octeon/octeon.h>
36
37 #include "cvmx-config.h"
38 #include "cvmx-fpa.h"
39 #include "cvmx-cmd-queue.h"
40
41 #include <asm/octeon/cvmx-npei-defs.h>
42 #include <asm/octeon/cvmx-pexp-defs.h>
43 #include "cvmx-pko-defs.h"
44
45 /**
46  * This application uses this pointer to access the global queue
47  * state. It points to a bootmem named block.
48  */
49 __cvmx_cmd_queue_all_state_t *__cvmx_cmd_queue_state_ptr;
50
51 /**
52  * Initialize the Global queue state pointer.
53  *
54  * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
55  */
56 static cvmx_cmd_queue_result_t __cvmx_cmd_queue_init_state_ptr(void)
57 {
58         char *alloc_name = "cvmx_cmd_queues";
59 #if defined(CONFIG_CAVIUM_RESERVE32) && CONFIG_CAVIUM_RESERVE32
60         extern uint64_t octeon_reserve32_memory;
61 #endif
62
63         if (likely(__cvmx_cmd_queue_state_ptr))
64                 return CVMX_CMD_QUEUE_SUCCESS;
65
66 #if defined(CONFIG_CAVIUM_RESERVE32) && CONFIG_CAVIUM_RESERVE32
67         if (octeon_reserve32_memory)
68                 __cvmx_cmd_queue_state_ptr =
69                     cvmx_bootmem_alloc_named_range(sizeof(*__cvmx_cmd_queue_state_ptr),
70                                                    octeon_reserve32_memory,
71                                                    octeon_reserve32_memory +
72                                                    (CONFIG_CAVIUM_RESERVE32 <<
73                                                     20) - 1, 128, alloc_name);
74         else
75 #endif
76                 __cvmx_cmd_queue_state_ptr =
77                     cvmx_bootmem_alloc_named(sizeof(*__cvmx_cmd_queue_state_ptr),
78                                             128,
79                                             alloc_name);
80         if (__cvmx_cmd_queue_state_ptr)
81                 memset(__cvmx_cmd_queue_state_ptr, 0,
82                        sizeof(*__cvmx_cmd_queue_state_ptr));
83         else {
84                 struct cvmx_bootmem_named_block_desc *block_desc =
85                     cvmx_bootmem_find_named_block(alloc_name);
86                 if (block_desc)
87                         __cvmx_cmd_queue_state_ptr =
88                             cvmx_phys_to_ptr(block_desc->base_addr);
89                 else {
90                         cvmx_dprintf
91                             ("ERROR: cvmx_cmd_queue_initialize: Unable to get named block %s.\n",
92                              alloc_name);
93                         return CVMX_CMD_QUEUE_NO_MEMORY;
94                 }
95         }
96         return CVMX_CMD_QUEUE_SUCCESS;
97 }
98
99 /**
100  * Initialize a command queue for use. The initial FPA buffer is
101  * allocated and the hardware unit is configured to point to the
102  * new command queue.
103  *
104  * @queue_id:  Hardware command queue to initialize.
105  * @max_depth: Maximum outstanding commands that can be queued.
106  * @fpa_pool:  FPA pool the command queues should come from.
107  * @pool_size: Size of each buffer in the FPA pool (bytes)
108  *
109  * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
110  */
111 cvmx_cmd_queue_result_t cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id,
112                                                   int max_depth, int fpa_pool,
113                                                   int pool_size)
114 {
115         __cvmx_cmd_queue_state_t *qstate;
116         cvmx_cmd_queue_result_t result = __cvmx_cmd_queue_init_state_ptr();
117         if (result != CVMX_CMD_QUEUE_SUCCESS)
118                 return result;
119
120         qstate = __cvmx_cmd_queue_get_state(queue_id);
121         if (qstate == NULL)
122                 return CVMX_CMD_QUEUE_INVALID_PARAM;
123
124         /*
125          * We artificially limit max_depth to 1<<20 words. It is an
126          * arbitrary limit.
127          */
128         if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH) {
129                 if ((max_depth < 0) || (max_depth > 1 << 20))
130                         return CVMX_CMD_QUEUE_INVALID_PARAM;
131         } else if (max_depth != 0)
132                 return CVMX_CMD_QUEUE_INVALID_PARAM;
133
134         if ((fpa_pool < 0) || (fpa_pool > 7))
135                 return CVMX_CMD_QUEUE_INVALID_PARAM;
136         if ((pool_size < 128) || (pool_size > 65536))
137                 return CVMX_CMD_QUEUE_INVALID_PARAM;
138
139         /* See if someone else has already initialized the queue */
140         if (qstate->base_ptr_div128) {
141                 if (max_depth != (int)qstate->max_depth) {
142                         cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
143                                 "Queue already initalized with different "
144                                 "max_depth (%d).\n",
145                              (int)qstate->max_depth);
146                         return CVMX_CMD_QUEUE_INVALID_PARAM;
147                 }
148                 if (fpa_pool != qstate->fpa_pool) {
149                         cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
150                                 "Queue already initalized with different "
151                                 "FPA pool (%u).\n",
152                              qstate->fpa_pool);
153                         return CVMX_CMD_QUEUE_INVALID_PARAM;
154                 }
155                 if ((pool_size >> 3) - 1 != qstate->pool_size_m1) {
156                         cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
157                                 "Queue already initalized with different "
158                                 "FPA pool size (%u).\n",
159                              (qstate->pool_size_m1 + 1) << 3);
160                         return CVMX_CMD_QUEUE_INVALID_PARAM;
161                 }
162                 CVMX_SYNCWS;
163                 return CVMX_CMD_QUEUE_ALREADY_SETUP;
164         } else {
165                 union cvmx_fpa_ctl_status status;
166                 void *buffer;
167
168                 status.u64 = cvmx_read_csr(CVMX_FPA_CTL_STATUS);
169                 if (!status.s.enb) {
170                         cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
171                                      "FPA is not enabled.\n");
172                         return CVMX_CMD_QUEUE_NO_MEMORY;
173                 }
174                 buffer = cvmx_fpa_alloc(fpa_pool);
175                 if (buffer == NULL) {
176                         cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
177                                      "Unable to allocate initial buffer.\n");
178                         return CVMX_CMD_QUEUE_NO_MEMORY;
179                 }
180
181                 memset(qstate, 0, sizeof(*qstate));
182                 qstate->max_depth = max_depth;
183                 qstate->fpa_pool = fpa_pool;
184                 qstate->pool_size_m1 = (pool_size >> 3) - 1;
185                 qstate->base_ptr_div128 = cvmx_ptr_to_phys(buffer) / 128;
186                 /*
187                  * We zeroed the now serving field so we need to also
188                  * zero the ticket.
189                  */
190                 __cvmx_cmd_queue_state_ptr->
191                     ticket[__cvmx_cmd_queue_get_index(queue_id)] = 0;
192                 CVMX_SYNCWS;
193                 return CVMX_CMD_QUEUE_SUCCESS;
194         }
195 }
196
197 /**
198  * Shutdown a queue a free it's command buffers to the FPA. The
199  * hardware connected to the queue must be stopped before this
200  * function is called.
201  *
202  * @queue_id: Queue to shutdown
203  *
204  * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
205  */
206 cvmx_cmd_queue_result_t cvmx_cmd_queue_shutdown(cvmx_cmd_queue_id_t queue_id)
207 {
208         __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
209         if (qptr == NULL) {
210                 cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Unable to "
211                              "get queue information.\n");
212                 return CVMX_CMD_QUEUE_INVALID_PARAM;
213         }
214
215         if (cvmx_cmd_queue_length(queue_id) > 0) {
216                 cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Queue still "
217                              "has data in it.\n");
218                 return CVMX_CMD_QUEUE_FULL;
219         }
220
221         __cvmx_cmd_queue_lock(queue_id, qptr);
222         if (qptr->base_ptr_div128) {
223                 cvmx_fpa_free(cvmx_phys_to_ptr
224                               ((uint64_t) qptr->base_ptr_div128 << 7),
225                               qptr->fpa_pool, 0);
226                 qptr->base_ptr_div128 = 0;
227         }
228         __cvmx_cmd_queue_unlock(qptr);
229
230         return CVMX_CMD_QUEUE_SUCCESS;
231 }
232
233 /**
234  * Return the number of command words pending in the queue. This
235  * function may be relatively slow for some hardware units.
236  *
237  * @queue_id: Hardware command queue to query
238  *
239  * Returns Number of outstanding commands
240  */
241 int cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id)
242 {
243         if (CVMX_ENABLE_PARAMETER_CHECKING) {
244                 if (__cvmx_cmd_queue_get_state(queue_id) == NULL)
245                         return CVMX_CMD_QUEUE_INVALID_PARAM;
246         }
247
248         /*
249          * The cast is here so gcc with check that all values in the
250          * cvmx_cmd_queue_id_t enumeration are here.
251          */
252         switch ((cvmx_cmd_queue_id_t) (queue_id & 0xff0000)) {
253         case CVMX_CMD_QUEUE_PKO_BASE:
254                 /*
255                  * FIXME: Need atomic lock on
256                  * CVMX_PKO_REG_READ_IDX. Right now we are normally
257                  * called with the queue lock, so that is a SLIGHT
258                  * amount of protection.
259                  */
260                 cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue_id & 0xffff);
261                 if (OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
262                         union cvmx_pko_mem_debug9 debug9;
263                         debug9.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG9);
264                         return debug9.cn38xx.doorbell;
265                 } else {
266                         union cvmx_pko_mem_debug8 debug8;
267                         debug8.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG8);
268                         return debug8.cn58xx.doorbell;
269                 }
270         case CVMX_CMD_QUEUE_ZIP:
271         case CVMX_CMD_QUEUE_DFA:
272         case CVMX_CMD_QUEUE_RAID:
273                 /* FIXME: Implement other lengths */
274                 return 0;
275         case CVMX_CMD_QUEUE_DMA_BASE:
276                 {
277                         union cvmx_npei_dmax_counts dmax_counts;
278                         dmax_counts.u64 =
279                             cvmx_read_csr(CVMX_PEXP_NPEI_DMAX_COUNTS
280                                           (queue_id & 0x7));
281                         return dmax_counts.s.dbell;
282                 }
283         case CVMX_CMD_QUEUE_END:
284                 return CVMX_CMD_QUEUE_INVALID_PARAM;
285         }
286         return CVMX_CMD_QUEUE_INVALID_PARAM;
287 }
288
289 /**
290  * Return the command buffer to be written to. The purpose of this
291  * function is to allow CVMX routine access t othe low level buffer
292  * for initial hardware setup. User applications should not call this
293  * function directly.
294  *
295  * @queue_id: Command queue to query
296  *
297  * Returns Command buffer or NULL on failure
298  */
299 void *cvmx_cmd_queue_buffer(cvmx_cmd_queue_id_t queue_id)
300 {
301         __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
302         if (qptr && qptr->base_ptr_div128)
303                 return cvmx_phys_to_ptr((uint64_t) qptr->base_ptr_div128 << 7);
304         else
305                 return NULL;
306 }