Linux-libre 4.7.10-gnu
[librecmc/linux-libre.git] / include / linux / qed / qed_chain.h
1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015 QLogic Corporation
3  *
4  * This software is available under the terms of the GNU General Public License
5  * (GPL) Version 2, available from the file COPYING in the main directory of
6  * this source tree.
7  */
8
9 #ifndef _QED_CHAIN_H
10 #define _QED_CHAIN_H
11
12 #include <linux/types.h>
13 #include <asm/byteorder.h>
14 #include <linux/kernel.h>
15 #include <linux/list.h>
16 #include <linux/slab.h>
17 #include <linux/qed/common_hsi.h>
18
19 /* dma_addr_t manip */
20 #define DMA_LO_LE(x)            cpu_to_le32(lower_32_bits(x))
21 #define DMA_HI_LE(x)            cpu_to_le32(upper_32_bits(x))
22 #define DMA_REGPAIR_LE(x, val)  do { \
23                                         (x).hi = DMA_HI_LE((val)); \
24                                         (x).lo = DMA_LO_LE((val)); \
25                                 } while (0)
26
27 #define HILO_GEN(hi, lo, type)  ((((type)(hi)) << 32) + (lo))
28 #define HILO_DMA(hi, lo)        HILO_GEN(hi, lo, dma_addr_t)
29 #define HILO_64(hi, lo) HILO_GEN((le32_to_cpu(hi)), (le32_to_cpu(lo)), u64)
30 #define HILO_DMA_REGPAIR(regpair)       (HILO_DMA(regpair.hi, regpair.lo))
31 #define HILO_64_REGPAIR(regpair)        (HILO_64(regpair.hi, regpair.lo))
32
33 enum qed_chain_mode {
34         /* Each Page contains a next pointer at its end */
35         QED_CHAIN_MODE_NEXT_PTR,
36
37         /* Chain is a single page (next ptr) is unrequired */
38         QED_CHAIN_MODE_SINGLE,
39
40         /* Page pointers are located in a side list */
41         QED_CHAIN_MODE_PBL,
42 };
43
44 enum qed_chain_use_mode {
45         QED_CHAIN_USE_TO_PRODUCE,               /* Chain starts empty */
46         QED_CHAIN_USE_TO_CONSUME,               /* Chain starts full */
47         QED_CHAIN_USE_TO_CONSUME_PRODUCE,       /* Chain starts empty */
48 };
49
50 struct qed_chain_next {
51         struct regpair  next_phys;
52         void            *next_virt;
53 };
54
55 struct qed_chain_pbl {
56         dma_addr_t      p_phys_table;
57         void            *p_virt_table;
58         u16             prod_page_idx;
59         u16             cons_page_idx;
60 };
61
62 struct qed_chain {
63         void                    *p_virt_addr;
64         dma_addr_t              p_phys_addr;
65         void                    *p_prod_elem;
66         void                    *p_cons_elem;
67         u16                     page_cnt;
68         enum qed_chain_mode     mode;
69         enum qed_chain_use_mode intended_use; /* used to produce/consume */
70         u16                     capacity; /*< number of _usable_ elements */
71         u16                     size; /* number of elements */
72         u16                     prod_idx;
73         u16                     cons_idx;
74         u16                     elem_per_page;
75         u16                     elem_per_page_mask;
76         u16                     elem_unusable;
77         u16                     usable_per_page;
78         u16                     elem_size;
79         u16                     next_page_mask;
80         struct qed_chain_pbl    pbl;
81 };
82
83 #define QED_CHAIN_PBL_ENTRY_SIZE        (8)
84 #define QED_CHAIN_PAGE_SIZE             (0x1000)
85 #define ELEMS_PER_PAGE(elem_size)       (QED_CHAIN_PAGE_SIZE / (elem_size))
86
87 #define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)     \
88         ((mode == QED_CHAIN_MODE_NEXT_PTR) ?         \
89          (1 + ((sizeof(struct qed_chain_next) - 1) / \
90                (elem_size))) : 0)
91
92 #define USABLE_ELEMS_PER_PAGE(elem_size, mode) \
93         ((u32)(ELEMS_PER_PAGE(elem_size) -     \
94                UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)))
95
96 #define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \
97         DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode))
98
99 /* Accessors */
100 static inline u16 qed_chain_get_prod_idx(struct qed_chain *p_chain)
101 {
102         return p_chain->prod_idx;
103 }
104
105 static inline u16 qed_chain_get_cons_idx(struct qed_chain *p_chain)
106 {
107         return p_chain->cons_idx;
108 }
109
110 static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain)
111 {
112         u16 used;
113
114         /* we don't need to trancate upon assignmet, as we assign u32->u16 */
115         used = ((u32)0x10000u + (u32)(p_chain->prod_idx)) -
116                 (u32)p_chain->cons_idx;
117         if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
118                 used -= p_chain->prod_idx / p_chain->elem_per_page -
119                         p_chain->cons_idx / p_chain->elem_per_page;
120
121         return p_chain->capacity - used;
122 }
123
124 static inline u8 qed_chain_is_full(struct qed_chain *p_chain)
125 {
126         return qed_chain_get_elem_left(p_chain) == p_chain->capacity;
127 }
128
129 static inline u8 qed_chain_is_empty(struct qed_chain *p_chain)
130 {
131         return qed_chain_get_elem_left(p_chain) == 0;
132 }
133
134 static inline u16 qed_chain_get_elem_per_page(
135         struct qed_chain *p_chain)
136 {
137         return p_chain->elem_per_page;
138 }
139
140 static inline u16 qed_chain_get_usable_per_page(
141         struct qed_chain *p_chain)
142 {
143         return p_chain->usable_per_page;
144 }
145
146 static inline u16 qed_chain_get_unusable_per_page(
147         struct qed_chain *p_chain)
148 {
149         return p_chain->elem_unusable;
150 }
151
152 static inline u16 qed_chain_get_size(struct qed_chain *p_chain)
153 {
154         return p_chain->size;
155 }
156
157 static inline dma_addr_t
158 qed_chain_get_pbl_phys(struct qed_chain *p_chain)
159 {
160         return p_chain->pbl.p_phys_table;
161 }
162
163 /**
164  * @brief qed_chain_advance_page -
165  *
166  * Advance the next element accros pages for a linked chain
167  *
168  * @param p_chain
169  * @param p_next_elem
170  * @param idx_to_inc
171  * @param page_to_inc
172  */
173 static inline void
174 qed_chain_advance_page(struct qed_chain *p_chain,
175                        void **p_next_elem,
176                        u16 *idx_to_inc,
177                        u16 *page_to_inc)
178
179 {
180         switch (p_chain->mode) {
181         case QED_CHAIN_MODE_NEXT_PTR:
182         {
183                 struct qed_chain_next *p_next = *p_next_elem;
184                 *p_next_elem = p_next->next_virt;
185                 *idx_to_inc += p_chain->elem_unusable;
186                 break;
187         }
188         case QED_CHAIN_MODE_SINGLE:
189                 *p_next_elem = p_chain->p_virt_addr;
190                 break;
191
192         case QED_CHAIN_MODE_PBL:
193                 /* It is assumed pages are sequential, next element needs
194                  * to change only when passing going back to first from last.
195                  */
196                 if (++(*page_to_inc) == p_chain->page_cnt) {
197                         *page_to_inc = 0;
198                         *p_next_elem = p_chain->p_virt_addr;
199                 }
200         }
201 }
202
203 #define is_unusable_idx(p, idx) \
204         (((p)->idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
205
206 #define is_unusable_next_idx(p, idx) \
207         ((((p)->idx + 1) & (p)->elem_per_page_mask) == (p)->usable_per_page)
208
209 #define test_ans_skip(p, idx)                           \
210         do {                                            \
211                 if (is_unusable_idx(p, idx)) {          \
212                         (p)->idx += (p)->elem_unusable; \
213                 }                                       \
214         } while (0)
215
216 /**
217  * @brief qed_chain_return_multi_produced -
218  *
219  * A chain in which the driver "Produces" elements should use this API
220  * to indicate previous produced elements are now consumed.
221  *
222  * @param p_chain
223  * @param num
224  */
225 static inline void
226 qed_chain_return_multi_produced(struct qed_chain *p_chain,
227                                 u16 num)
228 {
229         p_chain->cons_idx += num;
230         test_ans_skip(p_chain, cons_idx);
231 }
232
233 /**
234  * @brief qed_chain_return_produced -
235  *
236  * A chain in which the driver "Produces" elements should use this API
237  * to indicate previous produced elements are now consumed.
238  *
239  * @param p_chain
240  */
241 static inline void qed_chain_return_produced(struct qed_chain *p_chain)
242 {
243         p_chain->cons_idx++;
244         test_ans_skip(p_chain, cons_idx);
245 }
246
247 /**
248  * @brief qed_chain_produce -
249  *
250  * A chain in which the driver "Produces" elements should use this to get
251  * a pointer to the next element which can be "Produced". It's driver
252  * responsibility to validate that the chain has room for new element.
253  *
254  * @param p_chain
255  *
256  * @return void*, a pointer to next element
257  */
258 static inline void *qed_chain_produce(struct qed_chain *p_chain)
259 {
260         void *ret = NULL;
261
262         if ((p_chain->prod_idx & p_chain->elem_per_page_mask) ==
263             p_chain->next_page_mask) {
264                 qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
265                                        &p_chain->prod_idx,
266                                        &p_chain->pbl.prod_page_idx);
267         }
268
269         ret = p_chain->p_prod_elem;
270         p_chain->prod_idx++;
271         p_chain->p_prod_elem = (void *)(((u8 *)p_chain->p_prod_elem) +
272                                         p_chain->elem_size);
273
274         return ret;
275 }
276
277 /**
278  * @brief qed_chain_get_capacity -
279  *
280  * Get the maximum number of BDs in chain
281  *
282  * @param p_chain
283  * @param num
284  *
285  * @return u16, number of unusable BDs
286  */
287 static inline u16 qed_chain_get_capacity(struct qed_chain *p_chain)
288 {
289         return p_chain->capacity;
290 }
291
292 /**
293  * @brief qed_chain_recycle_consumed -
294  *
295  * Returns an element which was previously consumed;
296  * Increments producers so they could be written to FW.
297  *
298  * @param p_chain
299  */
300 static inline void
301 qed_chain_recycle_consumed(struct qed_chain *p_chain)
302 {
303         test_ans_skip(p_chain, prod_idx);
304         p_chain->prod_idx++;
305 }
306
307 /**
308  * @brief qed_chain_consume -
309  *
310  * A Chain in which the driver utilizes data written by a different source
311  * (i.e., FW) should use this to access passed buffers.
312  *
313  * @param p_chain
314  *
315  * @return void*, a pointer to the next buffer written
316  */
317 static inline void *qed_chain_consume(struct qed_chain *p_chain)
318 {
319         void *ret = NULL;
320
321         if ((p_chain->cons_idx & p_chain->elem_per_page_mask) ==
322             p_chain->next_page_mask) {
323                 qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
324                                        &p_chain->cons_idx,
325                                        &p_chain->pbl.cons_page_idx);
326         }
327
328         ret = p_chain->p_cons_elem;
329         p_chain->cons_idx++;
330         p_chain->p_cons_elem = (void *)(((u8 *)p_chain->p_cons_elem) +
331                                         p_chain->elem_size);
332
333         return ret;
334 }
335
336 /**
337  * @brief qed_chain_reset - Resets the chain to its start state
338  *
339  * @param p_chain pointer to a previously allocted chain
340  */
341 static inline void qed_chain_reset(struct qed_chain *p_chain)
342 {
343         int i;
344
345         p_chain->prod_idx       = 0;
346         p_chain->cons_idx       = 0;
347         p_chain->p_cons_elem    = p_chain->p_virt_addr;
348         p_chain->p_prod_elem    = p_chain->p_virt_addr;
349
350         if (p_chain->mode == QED_CHAIN_MODE_PBL) {
351                 p_chain->pbl.prod_page_idx      = p_chain->page_cnt - 1;
352                 p_chain->pbl.cons_page_idx      = p_chain->page_cnt - 1;
353         }
354
355         switch (p_chain->intended_use) {
356         case QED_CHAIN_USE_TO_CONSUME_PRODUCE:
357         case QED_CHAIN_USE_TO_PRODUCE:
358                 /* Do nothing */
359                 break;
360
361         case QED_CHAIN_USE_TO_CONSUME:
362                 /* produce empty elements */
363                 for (i = 0; i < p_chain->capacity; i++)
364                         qed_chain_recycle_consumed(p_chain);
365                 break;
366         }
367 }
368
369 /**
370  * @brief qed_chain_init - Initalizes a basic chain struct
371  *
372  * @param p_chain
373  * @param p_virt_addr
374  * @param p_phys_addr   physical address of allocated buffer's beginning
375  * @param page_cnt      number of pages in the allocated buffer
376  * @param elem_size     size of each element in the chain
377  * @param intended_use
378  * @param mode
379  */
380 static inline void qed_chain_init(struct qed_chain *p_chain,
381                                   void *p_virt_addr,
382                                   dma_addr_t p_phys_addr,
383                                   u16 page_cnt,
384                                   u8 elem_size,
385                                   enum qed_chain_use_mode intended_use,
386                                   enum qed_chain_mode mode)
387 {
388         /* chain fixed parameters */
389         p_chain->p_virt_addr    = p_virt_addr;
390         p_chain->p_phys_addr    = p_phys_addr;
391         p_chain->elem_size      = elem_size;
392         p_chain->page_cnt       = page_cnt;
393         p_chain->mode           = mode;
394
395         p_chain->intended_use           = intended_use;
396         p_chain->elem_per_page          = ELEMS_PER_PAGE(elem_size);
397         p_chain->usable_per_page =
398                 USABLE_ELEMS_PER_PAGE(elem_size, mode);
399         p_chain->capacity               = p_chain->usable_per_page * page_cnt;
400         p_chain->size                   = p_chain->elem_per_page * page_cnt;
401         p_chain->elem_per_page_mask     = p_chain->elem_per_page - 1;
402
403         p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode);
404
405         p_chain->next_page_mask = (p_chain->usable_per_page &
406                                    p_chain->elem_per_page_mask);
407
408         if (mode == QED_CHAIN_MODE_NEXT_PTR) {
409                 struct qed_chain_next   *p_next;
410                 u16                     i;
411
412                 for (i = 0; i < page_cnt - 1; i++) {
413                         /* Increment mem_phy to the next page. */
414                         p_phys_addr += QED_CHAIN_PAGE_SIZE;
415
416                         /* Initialize the physical address of the next page. */
417                         p_next = (struct qed_chain_next *)((u8 *)p_virt_addr +
418                                                            elem_size *
419                                                            p_chain->
420                                                            usable_per_page);
421
422                         p_next->next_phys.lo    = DMA_LO_LE(p_phys_addr);
423                         p_next->next_phys.hi    = DMA_HI_LE(p_phys_addr);
424
425                         /* Initialize the virtual address of the next page. */
426                         p_next->next_virt = (void *)((u8 *)p_virt_addr +
427                                                      QED_CHAIN_PAGE_SIZE);
428
429                         /* Move to the next page. */
430                         p_virt_addr = p_next->next_virt;
431                 }
432
433                 /* Last page's next should point to beginning of the chain */
434                 p_next = (struct qed_chain_next *)((u8 *)p_virt_addr +
435                                                    elem_size *
436                                                    p_chain->usable_per_page);
437
438                 p_next->next_phys.lo    = DMA_LO_LE(p_chain->p_phys_addr);
439                 p_next->next_phys.hi    = DMA_HI_LE(p_chain->p_phys_addr);
440                 p_next->next_virt       = p_chain->p_virt_addr;
441         }
442         qed_chain_reset(p_chain);
443 }
444
445 /**
446  * @brief qed_chain_pbl_init - Initalizes a basic pbl chain
447  *        struct
448  * @param p_chain
449  * @param p_virt_addr   virtual address of allocated buffer's beginning
450  * @param p_phys_addr   physical address of allocated buffer's beginning
451  * @param page_cnt      number of pages in the allocated buffer
452  * @param elem_size     size of each element in the chain
453  * @param use_mode
454  * @param p_phys_pbl    pointer to a pre-allocated side table
455  *                      which will hold physical page addresses.
456  * @param p_virt_pbl    pointer to a pre allocated side table
457  *                      which will hold virtual page addresses.
458  */
459 static inline void
460 qed_chain_pbl_init(struct qed_chain *p_chain,
461                    void *p_virt_addr,
462                    dma_addr_t p_phys_addr,
463                    u16 page_cnt,
464                    u8 elem_size,
465                    enum qed_chain_use_mode use_mode,
466                    dma_addr_t p_phys_pbl,
467                    dma_addr_t *p_virt_pbl)
468 {
469         dma_addr_t *p_pbl_dma = p_virt_pbl;
470         int i;
471
472         qed_chain_init(p_chain, p_virt_addr, p_phys_addr, page_cnt,
473                        elem_size, use_mode, QED_CHAIN_MODE_PBL);
474
475         p_chain->pbl.p_phys_table = p_phys_pbl;
476         p_chain->pbl.p_virt_table = p_virt_pbl;
477
478         /* Fill the PBL with physical addresses*/
479         for (i = 0; i < page_cnt; i++) {
480                 *p_pbl_dma = p_phys_addr;
481                 p_phys_addr += QED_CHAIN_PAGE_SIZE;
482                 p_pbl_dma++;
483         }
484 }
485
486 /**
487  * @brief qed_chain_set_prod - sets the prod to the given
488  *        value
489  *
490  * @param prod_idx
491  * @param p_prod_elem
492  */
493 static inline void qed_chain_set_prod(struct qed_chain *p_chain,
494                                       u16 prod_idx,
495                                       void *p_prod_elem)
496 {
497         p_chain->prod_idx       = prod_idx;
498         p_chain->p_prod_elem    = p_prod_elem;
499 }
500
501 /**
502  * @brief qed_chain_get_elem -
503  *
504  * get a pointer to an element represented by absolute idx
505  *
506  * @param p_chain
507  * @assumption p_chain->size is a power of 2
508  *
509  * @return void*, a pointer to next element
510  */
511 static inline void *qed_chain_sge_get_elem(struct qed_chain *p_chain,
512                                            u16 idx)
513 {
514         void *ret = NULL;
515
516         if (idx >= p_chain->size)
517                 return NULL;
518
519         ret = (u8 *)p_chain->p_virt_addr + p_chain->elem_size * idx;
520
521         return ret;
522 }
523
524 /**
525  * @brief qed_chain_sge_inc_cons_prod
526  *
527  * for sge chains, producer isn't increased serially, the ring
528  * is expected to be full at all times. Once elements are
529  * consumed, they are immediately produced.
530  *
531  * @param p_chain
532  * @param cnt
533  *
534  * @return inline void
535  */
536 static inline void
537 qed_chain_sge_inc_cons_prod(struct qed_chain *p_chain,
538                             u16 cnt)
539 {
540         p_chain->prod_idx += cnt;
541         p_chain->cons_idx += cnt;
542 }
543
544 #endif