ath79/mikrotik: use routerbootpart partitions
[oweals/openwrt.git] / target / linux / layerscape / patches-5.4 / 804-crypto-0035-crypto-caam-qi-use-QBMan-NXP-SDK-driver.patch
1 From 28ef279c55a914372bf41587f6264e8e3e61e7d5 Mon Sep 17 00:00:00 2001
2 From: Horia Geanta <horia.geanta@nxp.com>
3 Date: Mon, 12 Jun 2017 19:42:34 +0300
4 Subject: [PATCH] crypto: caam/qi - use QBMan (NXP) SDK driver
5 MIME-Version: 1.0
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
8
9 Update caam/qi to work with QBMan from NXP SDK.
10
11 Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
12
13 Squashed "crypto: caam/qi - fix FD congestion weight" fix.
14
15 Solved rebase conflicts:
16
17 drivers/crypto/caam/qi.c:579
18     kept call to dev_err_ratelimited, but changed to fd->status
19 drivers/crypto/caam/sg_sw_qm.h:96
20     kept changes from patch, but changed sg_count to len
21
22 Signed-off-by: Vlad Pelin <vlad.pelin@nxp.com>
23 Acked-by: Horia Geanta <horia.geanta@nxp.com>
24 ---
25  drivers/crypto/caam/Kconfig    |  2 +-
26  drivers/crypto/caam/qi.c       | 82 +++++++++++++++++++++---------------------
27  drivers/crypto/caam/qi.h       |  2 +-
28  drivers/crypto/caam/sg_sw_qm.h | 46 ++++++++++++++++--------
29  4 files changed, 74 insertions(+), 58 deletions(-)
30
31 --- a/drivers/crypto/caam/Kconfig
32 +++ b/drivers/crypto/caam/Kconfig
33 @@ -106,7 +106,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
34  
35  config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
36         bool "Queue Interface as Crypto API backend"
37 -       depends on FSL_DPAA && NET
38 +       depends on FSL_SDK_DPA && NET
39         default y
40         select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
41         select CRYPTO_AUTHENC
42 --- a/drivers/crypto/caam/qi.c
43 +++ b/drivers/crypto/caam/qi.c
44 @@ -9,7 +9,7 @@
45  
46  #include <linux/cpumask.h>
47  #include <linux/kthread.h>
48 -#include <soc/fsl/qman.h>
49 +#include <linux/fsl_qman.h>
50  
51  #include "regs.h"
52  #include "qi.h"
53 @@ -107,23 +107,21 @@ static void *caam_iova_to_virt(struct io
54  int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
55  {
56         struct qm_fd fd;
57 -       dma_addr_t addr;
58         int ret;
59         int num_retries = 0;
60  
61 -       qm_fd_clear_fd(&fd);
62 -       qm_fd_set_compound(&fd, qm_sg_entry_get_len(&req->fd_sgt[1]));
63 -
64 -       addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt),
65 +       fd.cmd = 0;
66 +       fd.format = qm_fd_compound;
67 +       fd.cong_weight = caam32_to_cpu(req->fd_sgt[1].length);
68 +       fd.addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt),
69                               DMA_BIDIRECTIONAL);
70 -       if (dma_mapping_error(qidev, addr)) {
71 +       if (dma_mapping_error(qidev, fd.addr)) {
72                 dev_err(qidev, "DMA mapping error for QI enqueue request\n");
73                 return -EIO;
74         }
75 -       qm_fd_addr_set64(&fd, addr);
76  
77         do {
78 -               ret = qman_enqueue(req->drv_ctx->req_fq, &fd);
79 +               ret = qman_enqueue(req->drv_ctx->req_fq, &fd, 0);
80                 if (likely(!ret))
81                         return 0;
82  
83 @@ -139,7 +137,7 @@ int caam_qi_enqueue(struct device *qidev
84  EXPORT_SYMBOL(caam_qi_enqueue);
85  
86  static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
87 -                          const union qm_mr_entry *msg)
88 +                          const struct qm_mr_entry *msg)
89  {
90         const struct qm_fd *fd;
91         struct caam_drv_req *drv_req;
92 @@ -148,7 +146,7 @@ static void caam_fq_ern_cb(struct qman_p
93  
94         fd = &msg->ern.fd;
95  
96 -       if (qm_fd_get_format(fd) != qm_fd_compound) {
97 +       if (fd->format != qm_fd_compound) {
98                 dev_err(qidev, "Non-compound FD from CAAM\n");
99                 return;
100         }
101 @@ -186,20 +184,22 @@ static struct qman_fq *create_caam_req_f
102         req_fq->cb.fqs = NULL;
103  
104         ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
105 -                               QMAN_FQ_FLAG_TO_DCPORTAL, req_fq);
106 +                               QMAN_FQ_FLAG_TO_DCPORTAL | QMAN_FQ_FLAG_LOCKED,
107 +                            req_fq);
108         if (ret) {
109                 dev_err(qidev, "Failed to create session req FQ\n");
110                 goto create_req_fq_fail;
111         }
112  
113 -       memset(&opts, 0, sizeof(opts));
114 -       opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
115 -                                  QM_INITFQ_WE_CONTEXTB |
116 -                                  QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID);
117 -       opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE);
118 -       qm_fqd_set_destwq(&opts.fqd, qm_channel_caam, 2);
119 -       opts.fqd.context_b = cpu_to_be32(qman_fq_fqid(rsp_fq));
120 -       qm_fqd_context_a_set64(&opts.fqd, hwdesc);
121 +       opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
122 +                      QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA |
123 +                      QM_INITFQ_WE_CGID;
124 +       opts.fqd.fq_ctrl = QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE;
125 +       opts.fqd.dest.channel = qm_channel_caam;
126 +       opts.fqd.dest.wq = 2;
127 +       opts.fqd.context_b = qman_fq_fqid(rsp_fq);
128 +       opts.fqd.context_a.hi = upper_32_bits(hwdesc);
129 +       opts.fqd.context_a.lo = lower_32_bits(hwdesc);
130         opts.fqd.cgid = qipriv.cgr.cgrid;
131  
132         ret = qman_init_fq(req_fq, fq_sched_flag, &opts);
133 @@ -213,7 +213,7 @@ static struct qman_fq *create_caam_req_f
134         return req_fq;
135  
136  init_req_fq_fail:
137 -       qman_destroy_fq(req_fq);
138 +       qman_destroy_fq(req_fq, 0);
139  create_req_fq_fail:
140         kfree(req_fq);
141         return ERR_PTR(ret);
142 @@ -281,7 +281,7 @@ empty_fq:
143         if (ret)
144                 dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid);
145  
146 -       qman_destroy_fq(fq);
147 +       qman_destroy_fq(fq, 0);
148         kfree(fq);
149  
150         return ret;
151 @@ -298,7 +298,7 @@ static int empty_caam_fq(struct qman_fq
152                 if (ret)
153                         return ret;
154  
155 -               if (!qm_mcr_np_get(&np, frm_cnt))
156 +               if (!np.frm_cnt)
157                         break;
158  
159                 msleep(20);
160 @@ -565,30 +565,28 @@ static enum qman_cb_dqrr_result caam_rsp
161         const struct qm_fd *fd;
162         struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
163         struct caam_drv_private *priv = dev_get_drvdata(qidev);
164 -       u32 status;
165  
166         if (caam_qi_napi_schedule(p, caam_napi))
167                 return qman_cb_dqrr_stop;
168  
169         fd = &dqrr->fd;
170 -       status = be32_to_cpu(fd->status);
171 -       if (unlikely(status)) {
172 -               u32 ssrc = status & JRSTA_SSRC_MASK;
173 -               u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
174 +       if (unlikely(fd->status)) {
175 +               u32 ssrc = fd->status & JRSTA_SSRC_MASK;
176 +               u8 err_id = fd->status & JRSTA_CCBERR_ERRID_MASK;
177  
178                 if (ssrc != JRSTA_SSRC_CCB_ERROR ||
179                     err_id != JRSTA_CCBERR_ERRID_ICVCHK)
180                         dev_err_ratelimited(qidev,
181                                             "Error: %#x in CAAM response FD\n",
182 -                                           status);
183 +                                           fd->status);
184         }
185  
186 -       if (unlikely(qm_fd_get_format(fd) != qm_fd_compound)) {
187 +       if (unlikely(fd->format != qm_fd_compound)) {
188                 dev_err(qidev, "Non-compound FD from CAAM\n");
189                 return qman_cb_dqrr_consume;
190         }
191  
192 -       drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd));
193 +       drv_req = caam_iova_to_virt(priv->domain, fd->addr);
194         if (unlikely(!drv_req)) {
195                 dev_err(qidev,
196                         "Can't find original request for caam response\n");
197 @@ -598,7 +596,7 @@ static enum qman_cb_dqrr_result caam_rsp
198         dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
199                          sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
200  
201 -       drv_req->cbk(drv_req, status);
202 +       drv_req->cbk(drv_req, fd->status);
203         return qman_cb_dqrr_consume;
204  }
205  
206 @@ -622,17 +620,18 @@ static int alloc_rsp_fq_cpu(struct devic
207                 return -ENODEV;
208         }
209  
210 -       memset(&opts, 0, sizeof(opts));
211 -       opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
212 -                                  QM_INITFQ_WE_CONTEXTB |
213 -                                  QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID);
214 -       opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING |
215 -                                      QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE);
216 -       qm_fqd_set_destwq(&opts.fqd, qman_affine_channel(cpu), 3);
217 +       opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
218 +               QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA |
219 +               QM_INITFQ_WE_CGID;
220 +       opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING | QM_FQCTRL_CPCSTASH |
221 +                          QM_FQCTRL_CGE;
222 +       opts.fqd.dest.channel = qman_affine_channel(cpu);
223 +       opts.fqd.dest.wq = 3;
224         opts.fqd.cgid = qipriv.cgr.cgrid;
225         opts.fqd.context_a.stashing.exclusive = QM_STASHING_EXCL_CTX |
226                                                 QM_STASHING_EXCL_DATA;
227 -       qm_fqd_set_stashing(&opts.fqd, 0, 1, 1);
228 +       opts.fqd.context_a.stashing.data_cl = 1;
229 +       opts.fqd.context_a.stashing.context_cl = 1;
230  
231         ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
232         if (ret) {
233 @@ -662,8 +661,7 @@ static int init_cgr(struct device *qidev
234  
235         qipriv.cgr.cb = cgr_cb;
236         memset(&opts, 0, sizeof(opts));
237 -       opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES |
238 -                                  QM_CGR_WE_MODE);
239 +       opts.we_mask = QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES | QM_CGR_WE_MODE;
240         opts.cgr.cscn_en = QM_CGR_EN;
241         opts.cgr.mode = QMAN_CGR_MODE_FRAME;
242         qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, val, 1);
243 --- a/drivers/crypto/caam/qi.h
244 +++ b/drivers/crypto/caam/qi.h
245 @@ -9,7 +9,7 @@
246  #ifndef __QI_H__
247  #define __QI_H__
248  
249 -#include <soc/fsl/qman.h>
250 +#include <linux/fsl_qman.h>
251  #include "compat.h"
252  #include "desc.h"
253  #include "desc_constr.h"
254 --- a/drivers/crypto/caam/sg_sw_qm.h
255 +++ b/drivers/crypto/caam/sg_sw_qm.h
256 @@ -7,46 +7,61 @@
257  #ifndef __SG_SW_QM_H
258  #define __SG_SW_QM_H
259  
260 -#include <soc/fsl/qman.h>
261 +#include <linux/fsl_qman.h>
262  #include "regs.h"
263  
264 +static inline void cpu_to_hw_sg(struct qm_sg_entry *qm_sg_ptr)
265 +{
266 +       dma_addr_t addr = qm_sg_ptr->opaque;
267 +
268 +       qm_sg_ptr->opaque = cpu_to_caam64(addr);
269 +       qm_sg_ptr->sgt_efl = cpu_to_caam32(qm_sg_ptr->sgt_efl);
270 +}
271 +
272  static inline void __dma_to_qm_sg(struct qm_sg_entry *qm_sg_ptr, dma_addr_t dma,
273 -                                 u16 offset)
274 +                                 u32 len, u16 offset)
275  {
276 -       qm_sg_entry_set64(qm_sg_ptr, dma);
277 +       qm_sg_ptr->addr = dma;
278 +       qm_sg_ptr->length = len;
279         qm_sg_ptr->__reserved2 = 0;
280         qm_sg_ptr->bpid = 0;
281 -       qm_sg_ptr->offset = cpu_to_be16(offset & QM_SG_OFF_MASK);
282 +       qm_sg_ptr->__reserved3 = 0;
283 +       qm_sg_ptr->offset = offset & QM_SG_OFFSET_MASK;
284 +
285 +       cpu_to_hw_sg(qm_sg_ptr);
286  }
287  
288  static inline void dma_to_qm_sg_one(struct qm_sg_entry *qm_sg_ptr,
289                                     dma_addr_t dma, u32 len, u16 offset)
290  {
291 -       __dma_to_qm_sg(qm_sg_ptr, dma, offset);
292 -       qm_sg_entry_set_len(qm_sg_ptr, len);
293 +       qm_sg_ptr->extension = 0;
294 +       qm_sg_ptr->final = 0;
295 +       __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
296  }
297  
298  static inline void dma_to_qm_sg_one_last(struct qm_sg_entry *qm_sg_ptr,
299                                          dma_addr_t dma, u32 len, u16 offset)
300  {
301 -       __dma_to_qm_sg(qm_sg_ptr, dma, offset);
302 -       qm_sg_entry_set_f(qm_sg_ptr, len);
303 +       qm_sg_ptr->extension = 0;
304 +       qm_sg_ptr->final = 1;
305 +       __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
306  }
307  
308  static inline void dma_to_qm_sg_one_ext(struct qm_sg_entry *qm_sg_ptr,
309                                         dma_addr_t dma, u32 len, u16 offset)
310  {
311 -       __dma_to_qm_sg(qm_sg_ptr, dma, offset);
312 -       qm_sg_ptr->cfg = cpu_to_be32(QM_SG_EXT | (len & QM_SG_LEN_MASK));
313 +       qm_sg_ptr->extension = 1;
314 +       qm_sg_ptr->final = 0;
315 +       __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
316  }
317  
318  static inline void dma_to_qm_sg_one_last_ext(struct qm_sg_entry *qm_sg_ptr,
319                                              dma_addr_t dma, u32 len,
320                                              u16 offset)
321  {
322 -       __dma_to_qm_sg(qm_sg_ptr, dma, offset);
323 -       qm_sg_ptr->cfg = cpu_to_be32(QM_SG_EXT | QM_SG_FIN |
324 -                                    (len & QM_SG_LEN_MASK));
325 +       qm_sg_ptr->extension = 1;
326 +       qm_sg_ptr->final = 1;
327 +       __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
328  }
329  
330  /*
331 @@ -79,7 +94,10 @@ static inline void sg_to_qm_sg_last(stru
332                                     struct qm_sg_entry *qm_sg_ptr, u16 offset)
333  {
334         qm_sg_ptr = sg_to_qm_sg(sg, len, qm_sg_ptr, offset);
335 -       qm_sg_entry_set_f(qm_sg_ptr, qm_sg_entry_get_len(qm_sg_ptr));
336 +
337 +       qm_sg_ptr->sgt_efl = caam32_to_cpu(qm_sg_ptr->sgt_efl);
338 +       qm_sg_ptr->final = 1;
339 +       qm_sg_ptr->sgt_efl = cpu_to_caam32(qm_sg_ptr->sgt_efl);
340  }
341  
342  #endif /* __SG_SW_QM_H */