Linux-libre 4.4.228-gnu
[librecmc/linux-libre.git] / drivers / crypto / n2_core.c
1 /* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support.
2  *
3  * Copyright (C) 2010, 2011 David S. Miller <davem@davemloft.net>
4  */
5
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/of.h>
11 #include <linux/of_device.h>
12 #include <linux/cpumask.h>
13 #include <linux/slab.h>
14 #include <linux/interrupt.h>
15 #include <linux/crypto.h>
16 #include <crypto/md5.h>
17 #include <crypto/sha.h>
18 #include <crypto/aes.h>
19 #include <crypto/des.h>
20 #include <linux/mutex.h>
21 #include <linux/delay.h>
22 #include <linux/sched.h>
23
24 #include <crypto/internal/hash.h>
25 #include <crypto/scatterwalk.h>
26 #include <crypto/algapi.h>
27
28 #include <asm/hypervisor.h>
29 #include <asm/mdesc.h>
30
31 #include "n2_core.h"
32
33 #define DRV_MODULE_NAME         "n2_crypto"
34 #define DRV_MODULE_VERSION      "0.2"
35 #define DRV_MODULE_RELDATE      "July 28, 2011"
36
37 static const char version[] =
38         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
39
40 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
41 MODULE_DESCRIPTION("Niagara2 Crypto driver");
42 MODULE_LICENSE("GPL");
43 MODULE_VERSION(DRV_MODULE_VERSION);
44
45 #define N2_CRA_PRIORITY         200
46
47 static DEFINE_MUTEX(spu_lock);
48
49 struct spu_queue {
50         cpumask_t               sharing;
51         unsigned long           qhandle;
52
53         spinlock_t              lock;
54         u8                      q_type;
55         void                    *q;
56         unsigned long           head;
57         unsigned long           tail;
58         struct list_head        jobs;
59
60         unsigned long           devino;
61
62         char                    irq_name[32];
63         unsigned int            irq;
64
65         struct list_head        list;
66 };
67
68 static struct spu_queue **cpu_to_cwq;
69 static struct spu_queue **cpu_to_mau;
70
71 static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off)
72 {
73         if (q->q_type == HV_NCS_QTYPE_MAU) {
74                 off += MAU_ENTRY_SIZE;
75                 if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES))
76                         off = 0;
77         } else {
78                 off += CWQ_ENTRY_SIZE;
79                 if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES))
80                         off = 0;
81         }
82         return off;
83 }
84
85 struct n2_request_common {
86         struct list_head        entry;
87         unsigned int            offset;
88 };
89 #define OFFSET_NOT_RUNNING      (~(unsigned int)0)
90
91 /* An async job request records the final tail value it used in
92  * n2_request_common->offset, test to see if that offset is in
93  * the range old_head, new_head, inclusive.
94  */
95 static inline bool job_finished(struct spu_queue *q, unsigned int offset,
96                                 unsigned long old_head, unsigned long new_head)
97 {
98         if (old_head <= new_head) {
99                 if (offset > old_head && offset <= new_head)
100                         return true;
101         } else {
102                 if (offset > old_head || offset <= new_head)
103                         return true;
104         }
105         return false;
106 }
107
108 /* When the HEAD marker is unequal to the actual HEAD, we get
109  * a virtual device INO interrupt.  We should process the
110  * completed CWQ entries and adjust the HEAD marker to clear
111  * the IRQ.
112  */
113 static irqreturn_t cwq_intr(int irq, void *dev_id)
114 {
115         unsigned long off, new_head, hv_ret;
116         struct spu_queue *q = dev_id;
117
118         pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n",
119                smp_processor_id(), q->qhandle);
120
121         spin_lock(&q->lock);
122
123         hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head);
124
125         pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n",
126                smp_processor_id(), new_head, hv_ret);
127
128         for (off = q->head; off != new_head; off = spu_next_offset(q, off)) {
129                 /* XXX ... XXX */
130         }
131
132         hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head);
133         if (hv_ret == HV_EOK)
134                 q->head = new_head;
135
136         spin_unlock(&q->lock);
137
138         return IRQ_HANDLED;
139 }
140
141 static irqreturn_t mau_intr(int irq, void *dev_id)
142 {
143         struct spu_queue *q = dev_id;
144         unsigned long head, hv_ret;
145
146         spin_lock(&q->lock);
147
148         pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n",
149                smp_processor_id(), q->qhandle);
150
151         hv_ret = sun4v_ncs_gethead(q->qhandle, &head);
152
153         pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n",
154                smp_processor_id(), head, hv_ret);
155
156         sun4v_ncs_sethead_marker(q->qhandle, head);
157
158         spin_unlock(&q->lock);
159
160         return IRQ_HANDLED;
161 }
162
163 static void *spu_queue_next(struct spu_queue *q, void *cur)
164 {
165         return q->q + spu_next_offset(q, cur - q->q);
166 }
167
168 static int spu_queue_num_free(struct spu_queue *q)
169 {
170         unsigned long head = q->head;
171         unsigned long tail = q->tail;
172         unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES);
173         unsigned long diff;
174
175         if (head > tail)
176                 diff = head - tail;
177         else
178                 diff = (end - tail) + head;
179
180         return (diff / CWQ_ENTRY_SIZE) - 1;
181 }
182
183 static void *spu_queue_alloc(struct spu_queue *q, int num_entries)
184 {
185         int avail = spu_queue_num_free(q);
186
187         if (avail >= num_entries)
188                 return q->q + q->tail;
189
190         return NULL;
191 }
192
193 static unsigned long spu_queue_submit(struct spu_queue *q, void *last)
194 {
195         unsigned long hv_ret, new_tail;
196
197         new_tail = spu_next_offset(q, last - q->q);
198
199         hv_ret = sun4v_ncs_settail(q->qhandle, new_tail);
200         if (hv_ret == HV_EOK)
201                 q->tail = new_tail;
202         return hv_ret;
203 }
204
205 static u64 control_word_base(unsigned int len, unsigned int hmac_key_len,
206                              int enc_type, int auth_type,
207                              unsigned int hash_len,
208                              bool sfas, bool sob, bool eob, bool encrypt,
209                              int opcode)
210 {
211         u64 word = (len - 1) & CONTROL_LEN;
212
213         word |= ((u64) opcode << CONTROL_OPCODE_SHIFT);
214         word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT);
215         word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT);
216         if (sfas)
217                 word |= CONTROL_STORE_FINAL_AUTH_STATE;
218         if (sob)
219                 word |= CONTROL_START_OF_BLOCK;
220         if (eob)
221                 word |= CONTROL_END_OF_BLOCK;
222         if (encrypt)
223                 word |= CONTROL_ENCRYPT;
224         if (hmac_key_len)
225                 word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT;
226         if (hash_len)
227                 word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT;
228
229         return word;
230 }
231
232 #if 0
233 static inline bool n2_should_run_async(struct spu_queue *qp, int this_len)
234 {
235         if (this_len >= 64 ||
236             qp->head != qp->tail)
237                 return true;
238         return false;
239 }
240 #endif
241
242 struct n2_ahash_alg {
243         struct list_head        entry;
244         const char              *hash_zero;
245         const u32               *hash_init;
246         u8                      hw_op_hashsz;
247         u8                      digest_size;
248         u8                      auth_type;
249         u8                      hmac_type;
250         struct ahash_alg        alg;
251 };
252
253 static inline struct n2_ahash_alg *n2_ahash_alg(struct crypto_tfm *tfm)
254 {
255         struct crypto_alg *alg = tfm->__crt_alg;
256         struct ahash_alg *ahash_alg;
257
258         ahash_alg = container_of(alg, struct ahash_alg, halg.base);
259
260         return container_of(ahash_alg, struct n2_ahash_alg, alg);
261 }
262
263 struct n2_hmac_alg {
264         const char              *child_alg;
265         struct n2_ahash_alg     derived;
266 };
267
268 static inline struct n2_hmac_alg *n2_hmac_alg(struct crypto_tfm *tfm)
269 {
270         struct crypto_alg *alg = tfm->__crt_alg;
271         struct ahash_alg *ahash_alg;
272
273         ahash_alg = container_of(alg, struct ahash_alg, halg.base);
274
275         return container_of(ahash_alg, struct n2_hmac_alg, derived.alg);
276 }
277
278 struct n2_hash_ctx {
279         struct crypto_ahash             *fallback_tfm;
280 };
281
282 #define N2_HASH_KEY_MAX                 32 /* HW limit for all HMAC requests */
283
284 struct n2_hmac_ctx {
285         struct n2_hash_ctx              base;
286
287         struct crypto_shash             *child_shash;
288
289         int                             hash_key_len;
290         unsigned char                   hash_key[N2_HASH_KEY_MAX];
291 };
292
293 struct n2_hash_req_ctx {
294         union {
295                 struct md5_state        md5;
296                 struct sha1_state       sha1;
297                 struct sha256_state     sha256;
298         } u;
299
300         struct ahash_request            fallback_req;
301 };
302
303 static int n2_hash_async_init(struct ahash_request *req)
304 {
305         struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
306         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
307         struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
308
309         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
310         rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
311
312         return crypto_ahash_init(&rctx->fallback_req);
313 }
314
315 static int n2_hash_async_update(struct ahash_request *req)
316 {
317         struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
318         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
319         struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
320
321         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
322         rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
323         rctx->fallback_req.nbytes = req->nbytes;
324         rctx->fallback_req.src = req->src;
325
326         return crypto_ahash_update(&rctx->fallback_req);
327 }
328
329 static int n2_hash_async_final(struct ahash_request *req)
330 {
331         struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
332         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
333         struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
334
335         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
336         rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
337         rctx->fallback_req.result = req->result;
338
339         return crypto_ahash_final(&rctx->fallback_req);
340 }
341
342 static int n2_hash_async_finup(struct ahash_request *req)
343 {
344         struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
345         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
346         struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
347
348         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
349         rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
350         rctx->fallback_req.nbytes = req->nbytes;
351         rctx->fallback_req.src = req->src;
352         rctx->fallback_req.result = req->result;
353
354         return crypto_ahash_finup(&rctx->fallback_req);
355 }
356
357 static int n2_hash_cra_init(struct crypto_tfm *tfm)
358 {
359         const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
360         struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
361         struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
362         struct crypto_ahash *fallback_tfm;
363         int err;
364
365         fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
366                                           CRYPTO_ALG_NEED_FALLBACK);
367         if (IS_ERR(fallback_tfm)) {
368                 pr_warning("Fallback driver '%s' could not be loaded!\n",
369                            fallback_driver_name);
370                 err = PTR_ERR(fallback_tfm);
371                 goto out;
372         }
373
374         crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
375                                          crypto_ahash_reqsize(fallback_tfm)));
376
377         ctx->fallback_tfm = fallback_tfm;
378         return 0;
379
380 out:
381         return err;
382 }
383
384 static void n2_hash_cra_exit(struct crypto_tfm *tfm)
385 {
386         struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
387         struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
388
389         crypto_free_ahash(ctx->fallback_tfm);
390 }
391
392 static int n2_hmac_cra_init(struct crypto_tfm *tfm)
393 {
394         const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
395         struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
396         struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
397         struct n2_hmac_alg *n2alg = n2_hmac_alg(tfm);
398         struct crypto_ahash *fallback_tfm;
399         struct crypto_shash *child_shash;
400         int err;
401
402         fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
403                                           CRYPTO_ALG_NEED_FALLBACK);
404         if (IS_ERR(fallback_tfm)) {
405                 pr_warning("Fallback driver '%s' could not be loaded!\n",
406                            fallback_driver_name);
407                 err = PTR_ERR(fallback_tfm);
408                 goto out;
409         }
410
411         child_shash = crypto_alloc_shash(n2alg->child_alg, 0, 0);
412         if (IS_ERR(child_shash)) {
413                 pr_warning("Child shash '%s' could not be loaded!\n",
414                            n2alg->child_alg);
415                 err = PTR_ERR(child_shash);
416                 goto out_free_fallback;
417         }
418
419         crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
420                                          crypto_ahash_reqsize(fallback_tfm)));
421
422         ctx->child_shash = child_shash;
423         ctx->base.fallback_tfm = fallback_tfm;
424         return 0;
425
426 out_free_fallback:
427         crypto_free_ahash(fallback_tfm);
428
429 out:
430         return err;
431 }
432
433 static void n2_hmac_cra_exit(struct crypto_tfm *tfm)
434 {
435         struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
436         struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
437
438         crypto_free_ahash(ctx->base.fallback_tfm);
439         crypto_free_shash(ctx->child_shash);
440 }
441
442 static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key,
443                                 unsigned int keylen)
444 {
445         struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
446         struct crypto_shash *child_shash = ctx->child_shash;
447         struct crypto_ahash *fallback_tfm;
448         SHASH_DESC_ON_STACK(shash, child_shash);
449         int err, bs, ds;
450
451         fallback_tfm = ctx->base.fallback_tfm;
452         err = crypto_ahash_setkey(fallback_tfm, key, keylen);
453         if (err)
454                 return err;
455
456         shash->tfm = child_shash;
457         shash->flags = crypto_ahash_get_flags(tfm) &
458                 CRYPTO_TFM_REQ_MAY_SLEEP;
459
460         bs = crypto_shash_blocksize(child_shash);
461         ds = crypto_shash_digestsize(child_shash);
462         BUG_ON(ds > N2_HASH_KEY_MAX);
463         if (keylen > bs) {
464                 err = crypto_shash_digest(shash, key, keylen,
465                                           ctx->hash_key);
466                 if (err)
467                         return err;
468                 keylen = ds;
469         } else if (keylen <= N2_HASH_KEY_MAX)
470                 memcpy(ctx->hash_key, key, keylen);
471
472         ctx->hash_key_len = keylen;
473
474         return err;
475 }
476
477 static unsigned long wait_for_tail(struct spu_queue *qp)
478 {
479         unsigned long head, hv_ret;
480
481         do {
482                 hv_ret = sun4v_ncs_gethead(qp->qhandle, &head);
483                 if (hv_ret != HV_EOK) {
484                         pr_err("Hypervisor error on gethead\n");
485                         break;
486                 }
487                 if (head == qp->tail) {
488                         qp->head = head;
489                         break;
490                 }
491         } while (1);
492         return hv_ret;
493 }
494
495 static unsigned long submit_and_wait_for_tail(struct spu_queue *qp,
496                                               struct cwq_initial_entry *ent)
497 {
498         unsigned long hv_ret = spu_queue_submit(qp, ent);
499
500         if (hv_ret == HV_EOK)
501                 hv_ret = wait_for_tail(qp);
502
503         return hv_ret;
504 }
505
506 static int n2_do_async_digest(struct ahash_request *req,
507                               unsigned int auth_type, unsigned int digest_size,
508                               unsigned int result_size, void *hash_loc,
509                               unsigned long auth_key, unsigned int auth_key_len)
510 {
511         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
512         struct cwq_initial_entry *ent;
513         struct crypto_hash_walk walk;
514         struct spu_queue *qp;
515         unsigned long flags;
516         int err = -ENODEV;
517         int nbytes, cpu;
518
519         /* The total effective length of the operation may not
520          * exceed 2^16.
521          */
522         if (unlikely(req->nbytes > (1 << 16))) {
523                 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
524                 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
525
526                 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
527                 rctx->fallback_req.base.flags =
528                         req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
529                 rctx->fallback_req.nbytes = req->nbytes;
530                 rctx->fallback_req.src = req->src;
531                 rctx->fallback_req.result = req->result;
532
533                 return crypto_ahash_digest(&rctx->fallback_req);
534         }
535
536         nbytes = crypto_hash_walk_first(req, &walk);
537
538         cpu = get_cpu();
539         qp = cpu_to_cwq[cpu];
540         if (!qp)
541                 goto out;
542
543         spin_lock_irqsave(&qp->lock, flags);
544
545         /* XXX can do better, improve this later by doing a by-hand scatterlist
546          * XXX walk, etc.
547          */
548         ent = qp->q + qp->tail;
549
550         ent->control = control_word_base(nbytes, auth_key_len, 0,
551                                          auth_type, digest_size,
552                                          false, true, false, false,
553                                          OPCODE_INPLACE_BIT |
554                                          OPCODE_AUTH_MAC);
555         ent->src_addr = __pa(walk.data);
556         ent->auth_key_addr = auth_key;
557         ent->auth_iv_addr = __pa(hash_loc);
558         ent->final_auth_state_addr = 0UL;
559         ent->enc_key_addr = 0UL;
560         ent->enc_iv_addr = 0UL;
561         ent->dest_addr = __pa(hash_loc);
562
563         nbytes = crypto_hash_walk_done(&walk, 0);
564         while (nbytes > 0) {
565                 ent = spu_queue_next(qp, ent);
566
567                 ent->control = (nbytes - 1);
568                 ent->src_addr = __pa(walk.data);
569                 ent->auth_key_addr = 0UL;
570                 ent->auth_iv_addr = 0UL;
571                 ent->final_auth_state_addr = 0UL;
572                 ent->enc_key_addr = 0UL;
573                 ent->enc_iv_addr = 0UL;
574                 ent->dest_addr = 0UL;
575
576                 nbytes = crypto_hash_walk_done(&walk, 0);
577         }
578         ent->control |= CONTROL_END_OF_BLOCK;
579
580         if (submit_and_wait_for_tail(qp, ent) != HV_EOK)
581                 err = -EINVAL;
582         else
583                 err = 0;
584
585         spin_unlock_irqrestore(&qp->lock, flags);
586
587         if (!err)
588                 memcpy(req->result, hash_loc, result_size);
589 out:
590         put_cpu();
591
592         return err;
593 }
594
595 static int n2_hash_async_digest(struct ahash_request *req)
596 {
597         struct n2_ahash_alg *n2alg = n2_ahash_alg(req->base.tfm);
598         struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
599         int ds;
600
601         ds = n2alg->digest_size;
602         if (unlikely(req->nbytes == 0)) {
603                 memcpy(req->result, n2alg->hash_zero, ds);
604                 return 0;
605         }
606         memcpy(&rctx->u, n2alg->hash_init, n2alg->hw_op_hashsz);
607
608         return n2_do_async_digest(req, n2alg->auth_type,
609                                   n2alg->hw_op_hashsz, ds,
610                                   &rctx->u, 0UL, 0);
611 }
612
613 static int n2_hmac_async_digest(struct ahash_request *req)
614 {
615         struct n2_hmac_alg *n2alg = n2_hmac_alg(req->base.tfm);
616         struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
617         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
618         struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
619         int ds;
620
621         ds = n2alg->derived.digest_size;
622         if (unlikely(req->nbytes == 0) ||
623             unlikely(ctx->hash_key_len > N2_HASH_KEY_MAX)) {
624                 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
625                 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
626
627                 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
628                 rctx->fallback_req.base.flags =
629                         req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
630                 rctx->fallback_req.nbytes = req->nbytes;
631                 rctx->fallback_req.src = req->src;
632                 rctx->fallback_req.result = req->result;
633
634                 return crypto_ahash_digest(&rctx->fallback_req);
635         }
636         memcpy(&rctx->u, n2alg->derived.hash_init,
637                n2alg->derived.hw_op_hashsz);
638
639         return n2_do_async_digest(req, n2alg->derived.hmac_type,
640                                   n2alg->derived.hw_op_hashsz, ds,
641                                   &rctx->u,
642                                   __pa(&ctx->hash_key),
643                                   ctx->hash_key_len);
644 }
645
646 struct n2_cipher_context {
647         int                     key_len;
648         int                     enc_type;
649         union {
650                 u8              aes[AES_MAX_KEY_SIZE];
651                 u8              des[DES_KEY_SIZE];
652                 u8              des3[3 * DES_KEY_SIZE];
653                 u8              arc4[258]; /* S-box, X, Y */
654         } key;
655 };
656
657 #define N2_CHUNK_ARR_LEN        16
658
659 struct n2_crypto_chunk {
660         struct list_head        entry;
661         unsigned long           iv_paddr : 44;
662         unsigned long           arr_len : 20;
663         unsigned long           dest_paddr;
664         unsigned long           dest_final;
665         struct {
666                 unsigned long   src_paddr : 44;
667                 unsigned long   src_len : 20;
668         } arr[N2_CHUNK_ARR_LEN];
669 };
670
671 struct n2_request_context {
672         struct ablkcipher_walk  walk;
673         struct list_head        chunk_list;
674         struct n2_crypto_chunk  chunk;
675         u8                      temp_iv[16];
676 };
677
678 /* The SPU allows some level of flexibility for partial cipher blocks
679  * being specified in a descriptor.
680  *
681  * It merely requires that every descriptor's length field is at least
682  * as large as the cipher block size.  This means that a cipher block
683  * can span at most 2 descriptors.  However, this does not allow a
684  * partial block to span into the final descriptor as that would
685  * violate the rule (since every descriptor's length must be at lest
686  * the block size).  So, for example, assuming an 8 byte block size:
687  *
688  *      0xe --> 0xa --> 0x8
689  *
690  * is a valid length sequence, whereas:
691  *
692  *      0xe --> 0xb --> 0x7
693  *
694  * is not a valid sequence.
695  */
696
697 struct n2_cipher_alg {
698         struct list_head        entry;
699         u8                      enc_type;
700         struct crypto_alg       alg;
701 };
702
703 static inline struct n2_cipher_alg *n2_cipher_alg(struct crypto_tfm *tfm)
704 {
705         struct crypto_alg *alg = tfm->__crt_alg;
706
707         return container_of(alg, struct n2_cipher_alg, alg);
708 }
709
710 struct n2_cipher_request_context {
711         struct ablkcipher_walk  walk;
712 };
713
714 static int n2_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
715                          unsigned int keylen)
716 {
717         struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
718         struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
719         struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
720
721         ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK);
722
723         switch (keylen) {
724         case AES_KEYSIZE_128:
725                 ctx->enc_type |= ENC_TYPE_ALG_AES128;
726                 break;
727         case AES_KEYSIZE_192:
728                 ctx->enc_type |= ENC_TYPE_ALG_AES192;
729                 break;
730         case AES_KEYSIZE_256:
731                 ctx->enc_type |= ENC_TYPE_ALG_AES256;
732                 break;
733         default:
734                 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
735                 return -EINVAL;
736         }
737
738         ctx->key_len = keylen;
739         memcpy(ctx->key.aes, key, keylen);
740         return 0;
741 }
742
743 static int n2_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
744                          unsigned int keylen)
745 {
746         struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
747         struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
748         struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
749         u32 tmp[DES_EXPKEY_WORDS];
750         int err;
751
752         ctx->enc_type = n2alg->enc_type;
753
754         if (keylen != DES_KEY_SIZE) {
755                 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
756                 return -EINVAL;
757         }
758
759         err = des_ekey(tmp, key);
760         if (err == 0 && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
761                 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
762                 return -EINVAL;
763         }
764
765         ctx->key_len = keylen;
766         memcpy(ctx->key.des, key, keylen);
767         return 0;
768 }
769
770 static int n2_3des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
771                           unsigned int keylen)
772 {
773         struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
774         struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
775         struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
776
777         ctx->enc_type = n2alg->enc_type;
778
779         if (keylen != (3 * DES_KEY_SIZE)) {
780                 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
781                 return -EINVAL;
782         }
783         ctx->key_len = keylen;
784         memcpy(ctx->key.des3, key, keylen);
785         return 0;
786 }
787
788 static int n2_arc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
789                           unsigned int keylen)
790 {
791         struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
792         struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
793         struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
794         u8 *s = ctx->key.arc4;
795         u8 *x = s + 256;
796         u8 *y = x + 1;
797         int i, j, k;
798
799         ctx->enc_type = n2alg->enc_type;
800
801         j = k = 0;
802         *x = 0;
803         *y = 0;
804         for (i = 0; i < 256; i++)
805                 s[i] = i;
806         for (i = 0; i < 256; i++) {
807                 u8 a = s[i];
808                 j = (j + key[k] + a) & 0xff;
809                 s[i] = s[j];
810                 s[j] = a;
811                 if (++k >= keylen)
812                         k = 0;
813         }
814
815         return 0;
816 }
817
818 static inline int cipher_descriptor_len(int nbytes, unsigned int block_size)
819 {
820         int this_len = nbytes;
821
822         this_len -= (nbytes & (block_size - 1));
823         return this_len > (1 << 16) ? (1 << 16) : this_len;
824 }
825
826 static int __n2_crypt_chunk(struct crypto_tfm *tfm, struct n2_crypto_chunk *cp,
827                             struct spu_queue *qp, bool encrypt)
828 {
829         struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
830         struct cwq_initial_entry *ent;
831         bool in_place;
832         int i;
833
834         ent = spu_queue_alloc(qp, cp->arr_len);
835         if (!ent) {
836                 pr_info("queue_alloc() of %d fails\n",
837                         cp->arr_len);
838                 return -EBUSY;
839         }
840
841         in_place = (cp->dest_paddr == cp->arr[0].src_paddr);
842
843         ent->control = control_word_base(cp->arr[0].src_len,
844                                          0, ctx->enc_type, 0, 0,
845                                          false, true, false, encrypt,
846                                          OPCODE_ENCRYPT |
847                                          (in_place ? OPCODE_INPLACE_BIT : 0));
848         ent->src_addr = cp->arr[0].src_paddr;
849         ent->auth_key_addr = 0UL;
850         ent->auth_iv_addr = 0UL;
851         ent->final_auth_state_addr = 0UL;
852         ent->enc_key_addr = __pa(&ctx->key);
853         ent->enc_iv_addr = cp->iv_paddr;
854         ent->dest_addr = (in_place ? 0UL : cp->dest_paddr);
855
856         for (i = 1; i < cp->arr_len; i++) {
857                 ent = spu_queue_next(qp, ent);
858
859                 ent->control = cp->arr[i].src_len - 1;
860                 ent->src_addr = cp->arr[i].src_paddr;
861                 ent->auth_key_addr = 0UL;
862                 ent->auth_iv_addr = 0UL;
863                 ent->final_auth_state_addr = 0UL;
864                 ent->enc_key_addr = 0UL;
865                 ent->enc_iv_addr = 0UL;
866                 ent->dest_addr = 0UL;
867         }
868         ent->control |= CONTROL_END_OF_BLOCK;
869
870         return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0;
871 }
872
873 static int n2_compute_chunks(struct ablkcipher_request *req)
874 {
875         struct n2_request_context *rctx = ablkcipher_request_ctx(req);
876         struct ablkcipher_walk *walk = &rctx->walk;
877         struct n2_crypto_chunk *chunk;
878         unsigned long dest_prev;
879         unsigned int tot_len;
880         bool prev_in_place;
881         int err, nbytes;
882
883         ablkcipher_walk_init(walk, req->dst, req->src, req->nbytes);
884         err = ablkcipher_walk_phys(req, walk);
885         if (err)
886                 return err;
887
888         INIT_LIST_HEAD(&rctx->chunk_list);
889
890         chunk = &rctx->chunk;
891         INIT_LIST_HEAD(&chunk->entry);
892
893         chunk->iv_paddr = 0UL;
894         chunk->arr_len = 0;
895         chunk->dest_paddr = 0UL;
896
897         prev_in_place = false;
898         dest_prev = ~0UL;
899         tot_len = 0;
900
901         while ((nbytes = walk->nbytes) != 0) {
902                 unsigned long dest_paddr, src_paddr;
903                 bool in_place;
904                 int this_len;
905
906                 src_paddr = (page_to_phys(walk->src.page) +
907                              walk->src.offset);
908                 dest_paddr = (page_to_phys(walk->dst.page) +
909                               walk->dst.offset);
910                 in_place = (src_paddr == dest_paddr);
911                 this_len = cipher_descriptor_len(nbytes, walk->blocksize);
912
913                 if (chunk->arr_len != 0) {
914                         if (in_place != prev_in_place ||
915                             (!prev_in_place &&
916                              dest_paddr != dest_prev) ||
917                             chunk->arr_len == N2_CHUNK_ARR_LEN ||
918                             tot_len + this_len > (1 << 16)) {
919                                 chunk->dest_final = dest_prev;
920                                 list_add_tail(&chunk->entry,
921                                               &rctx->chunk_list);
922                                 chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC);
923                                 if (!chunk) {
924                                         err = -ENOMEM;
925                                         break;
926                                 }
927                                 INIT_LIST_HEAD(&chunk->entry);
928                         }
929                 }
930                 if (chunk->arr_len == 0) {
931                         chunk->dest_paddr = dest_paddr;
932                         tot_len = 0;
933                 }
934                 chunk->arr[chunk->arr_len].src_paddr = src_paddr;
935                 chunk->arr[chunk->arr_len].src_len = this_len;
936                 chunk->arr_len++;
937
938                 dest_prev = dest_paddr + this_len;
939                 prev_in_place = in_place;
940                 tot_len += this_len;
941
942                 err = ablkcipher_walk_done(req, walk, nbytes - this_len);
943                 if (err)
944                         break;
945         }
946         if (!err && chunk->arr_len != 0) {
947                 chunk->dest_final = dest_prev;
948                 list_add_tail(&chunk->entry, &rctx->chunk_list);
949         }
950
951         return err;
952 }
953
954 static void n2_chunk_complete(struct ablkcipher_request *req, void *final_iv)
955 {
956         struct n2_request_context *rctx = ablkcipher_request_ctx(req);
957         struct n2_crypto_chunk *c, *tmp;
958
959         if (final_iv)
960                 memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize);
961
962         ablkcipher_walk_complete(&rctx->walk);
963         list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
964                 list_del(&c->entry);
965                 if (unlikely(c != &rctx->chunk))
966                         kfree(c);
967         }
968
969 }
970
971 static int n2_do_ecb(struct ablkcipher_request *req, bool encrypt)
972 {
973         struct n2_request_context *rctx = ablkcipher_request_ctx(req);
974         struct crypto_tfm *tfm = req->base.tfm;
975         int err = n2_compute_chunks(req);
976         struct n2_crypto_chunk *c, *tmp;
977         unsigned long flags, hv_ret;
978         struct spu_queue *qp;
979
980         if (err)
981                 return err;
982
983         qp = cpu_to_cwq[get_cpu()];
984         err = -ENODEV;
985         if (!qp)
986                 goto out;
987
988         spin_lock_irqsave(&qp->lock, flags);
989
990         list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
991                 err = __n2_crypt_chunk(tfm, c, qp, encrypt);
992                 if (err)
993                         break;
994                 list_del(&c->entry);
995                 if (unlikely(c != &rctx->chunk))
996                         kfree(c);
997         }
998         if (!err) {
999                 hv_ret = wait_for_tail(qp);
1000                 if (hv_ret != HV_EOK)
1001                         err = -EINVAL;
1002         }
1003
1004         spin_unlock_irqrestore(&qp->lock, flags);
1005
1006 out:
1007         put_cpu();
1008
1009         n2_chunk_complete(req, NULL);
1010         return err;
1011 }
1012
1013 static int n2_encrypt_ecb(struct ablkcipher_request *req)
1014 {
1015         return n2_do_ecb(req, true);
1016 }
1017
1018 static int n2_decrypt_ecb(struct ablkcipher_request *req)
1019 {
1020         return n2_do_ecb(req, false);
1021 }
1022
1023 static int n2_do_chaining(struct ablkcipher_request *req, bool encrypt)
1024 {
1025         struct n2_request_context *rctx = ablkcipher_request_ctx(req);
1026         struct crypto_tfm *tfm = req->base.tfm;
1027         unsigned long flags, hv_ret, iv_paddr;
1028         int err = n2_compute_chunks(req);
1029         struct n2_crypto_chunk *c, *tmp;
1030         struct spu_queue *qp;
1031         void *final_iv_addr;
1032
1033         final_iv_addr = NULL;
1034
1035         if (err)
1036                 return err;
1037
1038         qp = cpu_to_cwq[get_cpu()];
1039         err = -ENODEV;
1040         if (!qp)
1041                 goto out;
1042
1043         spin_lock_irqsave(&qp->lock, flags);
1044
1045         if (encrypt) {
1046                 iv_paddr = __pa(rctx->walk.iv);
1047                 list_for_each_entry_safe(c, tmp, &rctx->chunk_list,
1048                                          entry) {
1049                         c->iv_paddr = iv_paddr;
1050                         err = __n2_crypt_chunk(tfm, c, qp, true);
1051                         if (err)
1052                                 break;
1053                         iv_paddr = c->dest_final - rctx->walk.blocksize;
1054                         list_del(&c->entry);
1055                         if (unlikely(c != &rctx->chunk))
1056                                 kfree(c);
1057                 }
1058                 final_iv_addr = __va(iv_paddr);
1059         } else {
1060                 list_for_each_entry_safe_reverse(c, tmp, &rctx->chunk_list,
1061                                                  entry) {
1062                         if (c == &rctx->chunk) {
1063                                 iv_paddr = __pa(rctx->walk.iv);
1064                         } else {
1065                                 iv_paddr = (tmp->arr[tmp->arr_len-1].src_paddr +
1066                                             tmp->arr[tmp->arr_len-1].src_len -
1067                                             rctx->walk.blocksize);
1068                         }
1069                         if (!final_iv_addr) {
1070                                 unsigned long pa;
1071
1072                                 pa = (c->arr[c->arr_len-1].src_paddr +
1073                                       c->arr[c->arr_len-1].src_len -
1074                                       rctx->walk.blocksize);
1075                                 final_iv_addr = rctx->temp_iv;
1076                                 memcpy(rctx->temp_iv, __va(pa),
1077                                        rctx->walk.blocksize);
1078                         }
1079                         c->iv_paddr = iv_paddr;
1080                         err = __n2_crypt_chunk(tfm, c, qp, false);
1081                         if (err)
1082                                 break;
1083                         list_del(&c->entry);
1084                         if (unlikely(c != &rctx->chunk))
1085                                 kfree(c);
1086                 }
1087         }
1088         if (!err) {
1089                 hv_ret = wait_for_tail(qp);
1090                 if (hv_ret != HV_EOK)
1091                         err = -EINVAL;
1092         }
1093
1094         spin_unlock_irqrestore(&qp->lock, flags);
1095
1096 out:
1097         put_cpu();
1098
1099         n2_chunk_complete(req, err ? NULL : final_iv_addr);
1100         return err;
1101 }
1102
1103 static int n2_encrypt_chaining(struct ablkcipher_request *req)
1104 {
1105         return n2_do_chaining(req, true);
1106 }
1107
1108 static int n2_decrypt_chaining(struct ablkcipher_request *req)
1109 {
1110         return n2_do_chaining(req, false);
1111 }
1112
1113 struct n2_cipher_tmpl {
1114         const char              *name;
1115         const char              *drv_name;
1116         u8                      block_size;
1117         u8                      enc_type;
1118         struct ablkcipher_alg   ablkcipher;
1119 };
1120
1121 static const struct n2_cipher_tmpl cipher_tmpls[] = {
1122         /* ARC4: only ECB is supported (chaining bits ignored) */
1123         {       .name           = "ecb(arc4)",
1124                 .drv_name       = "ecb-arc4",
1125                 .block_size     = 1,
1126                 .enc_type       = (ENC_TYPE_ALG_RC4_STREAM |
1127                                    ENC_TYPE_CHAINING_ECB),
1128                 .ablkcipher     = {
1129                         .min_keysize    = 1,
1130                         .max_keysize    = 256,
1131                         .setkey         = n2_arc4_setkey,
1132                         .encrypt        = n2_encrypt_ecb,
1133                         .decrypt        = n2_decrypt_ecb,
1134                 },
1135         },
1136
1137         /* DES: ECB CBC and CFB are supported */
1138         {       .name           = "ecb(des)",
1139                 .drv_name       = "ecb-des",
1140                 .block_size     = DES_BLOCK_SIZE,
1141                 .enc_type       = (ENC_TYPE_ALG_DES |
1142                                    ENC_TYPE_CHAINING_ECB),
1143                 .ablkcipher     = {
1144                         .min_keysize    = DES_KEY_SIZE,
1145                         .max_keysize    = DES_KEY_SIZE,
1146                         .setkey         = n2_des_setkey,
1147                         .encrypt        = n2_encrypt_ecb,
1148                         .decrypt        = n2_decrypt_ecb,
1149                 },
1150         },
1151         {       .name           = "cbc(des)",
1152                 .drv_name       = "cbc-des",
1153                 .block_size     = DES_BLOCK_SIZE,
1154                 .enc_type       = (ENC_TYPE_ALG_DES |
1155                                    ENC_TYPE_CHAINING_CBC),
1156                 .ablkcipher     = {
1157                         .ivsize         = DES_BLOCK_SIZE,
1158                         .min_keysize    = DES_KEY_SIZE,
1159                         .max_keysize    = DES_KEY_SIZE,
1160                         .setkey         = n2_des_setkey,
1161                         .encrypt        = n2_encrypt_chaining,
1162                         .decrypt        = n2_decrypt_chaining,
1163                 },
1164         },
1165         {       .name           = "cfb(des)",
1166                 .drv_name       = "cfb-des",
1167                 .block_size     = DES_BLOCK_SIZE,
1168                 .enc_type       = (ENC_TYPE_ALG_DES |
1169                                    ENC_TYPE_CHAINING_CFB),
1170                 .ablkcipher     = {
1171                         .min_keysize    = DES_KEY_SIZE,
1172                         .max_keysize    = DES_KEY_SIZE,
1173                         .setkey         = n2_des_setkey,
1174                         .encrypt        = n2_encrypt_chaining,
1175                         .decrypt        = n2_decrypt_chaining,
1176                 },
1177         },
1178
1179         /* 3DES: ECB CBC and CFB are supported */
1180         {       .name           = "ecb(des3_ede)",
1181                 .drv_name       = "ecb-3des",
1182                 .block_size     = DES_BLOCK_SIZE,
1183                 .enc_type       = (ENC_TYPE_ALG_3DES |
1184                                    ENC_TYPE_CHAINING_ECB),
1185                 .ablkcipher     = {
1186                         .min_keysize    = 3 * DES_KEY_SIZE,
1187                         .max_keysize    = 3 * DES_KEY_SIZE,
1188                         .setkey         = n2_3des_setkey,
1189                         .encrypt        = n2_encrypt_ecb,
1190                         .decrypt        = n2_decrypt_ecb,
1191                 },
1192         },
1193         {       .name           = "cbc(des3_ede)",
1194                 .drv_name       = "cbc-3des",
1195                 .block_size     = DES_BLOCK_SIZE,
1196                 .enc_type       = (ENC_TYPE_ALG_3DES |
1197                                    ENC_TYPE_CHAINING_CBC),
1198                 .ablkcipher     = {
1199                         .ivsize         = DES_BLOCK_SIZE,
1200                         .min_keysize    = 3 * DES_KEY_SIZE,
1201                         .max_keysize    = 3 * DES_KEY_SIZE,
1202                         .setkey         = n2_3des_setkey,
1203                         .encrypt        = n2_encrypt_chaining,
1204                         .decrypt        = n2_decrypt_chaining,
1205                 },
1206         },
1207         {       .name           = "cfb(des3_ede)",
1208                 .drv_name       = "cfb-3des",
1209                 .block_size     = DES_BLOCK_SIZE,
1210                 .enc_type       = (ENC_TYPE_ALG_3DES |
1211                                    ENC_TYPE_CHAINING_CFB),
1212                 .ablkcipher     = {
1213                         .min_keysize    = 3 * DES_KEY_SIZE,
1214                         .max_keysize    = 3 * DES_KEY_SIZE,
1215                         .setkey         = n2_3des_setkey,
1216                         .encrypt        = n2_encrypt_chaining,
1217                         .decrypt        = n2_decrypt_chaining,
1218                 },
1219         },
1220         /* AES: ECB CBC and CTR are supported */
1221         {       .name           = "ecb(aes)",
1222                 .drv_name       = "ecb-aes",
1223                 .block_size     = AES_BLOCK_SIZE,
1224                 .enc_type       = (ENC_TYPE_ALG_AES128 |
1225                                    ENC_TYPE_CHAINING_ECB),
1226                 .ablkcipher     = {
1227                         .min_keysize    = AES_MIN_KEY_SIZE,
1228                         .max_keysize    = AES_MAX_KEY_SIZE,
1229                         .setkey         = n2_aes_setkey,
1230                         .encrypt        = n2_encrypt_ecb,
1231                         .decrypt        = n2_decrypt_ecb,
1232                 },
1233         },
1234         {       .name           = "cbc(aes)",
1235                 .drv_name       = "cbc-aes",
1236                 .block_size     = AES_BLOCK_SIZE,
1237                 .enc_type       = (ENC_TYPE_ALG_AES128 |
1238                                    ENC_TYPE_CHAINING_CBC),
1239                 .ablkcipher     = {
1240                         .ivsize         = AES_BLOCK_SIZE,
1241                         .min_keysize    = AES_MIN_KEY_SIZE,
1242                         .max_keysize    = AES_MAX_KEY_SIZE,
1243                         .setkey         = n2_aes_setkey,
1244                         .encrypt        = n2_encrypt_chaining,
1245                         .decrypt        = n2_decrypt_chaining,
1246                 },
1247         },
1248         {       .name           = "ctr(aes)",
1249                 .drv_name       = "ctr-aes",
1250                 .block_size     = AES_BLOCK_SIZE,
1251                 .enc_type       = (ENC_TYPE_ALG_AES128 |
1252                                    ENC_TYPE_CHAINING_COUNTER),
1253                 .ablkcipher     = {
1254                         .ivsize         = AES_BLOCK_SIZE,
1255                         .min_keysize    = AES_MIN_KEY_SIZE,
1256                         .max_keysize    = AES_MAX_KEY_SIZE,
1257                         .setkey         = n2_aes_setkey,
1258                         .encrypt        = n2_encrypt_chaining,
1259                         .decrypt        = n2_encrypt_chaining,
1260                 },
1261         },
1262
1263 };
1264 #define NUM_CIPHER_TMPLS ARRAY_SIZE(cipher_tmpls)
1265
1266 static LIST_HEAD(cipher_algs);
1267
1268 struct n2_hash_tmpl {
1269         const char      *name;
1270         const char      *hash_zero;
1271         const u32       *hash_init;
1272         u8              hw_op_hashsz;
1273         u8              digest_size;
1274         u8              block_size;
1275         u8              auth_type;
1276         u8              hmac_type;
1277 };
1278
1279 static const char md5_zero[MD5_DIGEST_SIZE] = {
1280         0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04,
1281         0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e,
1282 };
1283 static const u32 md5_init[MD5_HASH_WORDS] = {
1284         cpu_to_le32(MD5_H0),
1285         cpu_to_le32(MD5_H1),
1286         cpu_to_le32(MD5_H2),
1287         cpu_to_le32(MD5_H3),
1288 };
1289 static const char sha1_zero[SHA1_DIGEST_SIZE] = {
1290         0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32,
1291         0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8,
1292         0x07, 0x09
1293 };
1294 static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = {
1295         SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4,
1296 };
1297 static const char sha256_zero[SHA256_DIGEST_SIZE] = {
1298         0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a,
1299         0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae,
1300         0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99,
1301         0x1b, 0x78, 0x52, 0xb8, 0x55
1302 };
1303 static const u32 sha256_init[SHA256_DIGEST_SIZE / 4] = {
1304         SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
1305         SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7,
1306 };
1307 static const char sha224_zero[SHA224_DIGEST_SIZE] = {
1308         0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47,
1309         0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2,
1310         0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4,
1311         0x2f
1312 };
1313 static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = {
1314         SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
1315         SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7,
1316 };
1317
1318 static const struct n2_hash_tmpl hash_tmpls[] = {
1319         { .name         = "md5",
1320           .hash_zero    = md5_zero,
1321           .hash_init    = md5_init,
1322           .auth_type    = AUTH_TYPE_MD5,
1323           .hmac_type    = AUTH_TYPE_HMAC_MD5,
1324           .hw_op_hashsz = MD5_DIGEST_SIZE,
1325           .digest_size  = MD5_DIGEST_SIZE,
1326           .block_size   = MD5_HMAC_BLOCK_SIZE },
1327         { .name         = "sha1",
1328           .hash_zero    = sha1_zero,
1329           .hash_init    = sha1_init,
1330           .auth_type    = AUTH_TYPE_SHA1,
1331           .hmac_type    = AUTH_TYPE_HMAC_SHA1,
1332           .hw_op_hashsz = SHA1_DIGEST_SIZE,
1333           .digest_size  = SHA1_DIGEST_SIZE,
1334           .block_size   = SHA1_BLOCK_SIZE },
1335         { .name         = "sha256",
1336           .hash_zero    = sha256_zero,
1337           .hash_init    = sha256_init,
1338           .auth_type    = AUTH_TYPE_SHA256,
1339           .hmac_type    = AUTH_TYPE_HMAC_SHA256,
1340           .hw_op_hashsz = SHA256_DIGEST_SIZE,
1341           .digest_size  = SHA256_DIGEST_SIZE,
1342           .block_size   = SHA256_BLOCK_SIZE },
1343         { .name         = "sha224",
1344           .hash_zero    = sha224_zero,
1345           .hash_init    = sha224_init,
1346           .auth_type    = AUTH_TYPE_SHA256,
1347           .hmac_type    = AUTH_TYPE_RESERVED,
1348           .hw_op_hashsz = SHA256_DIGEST_SIZE,
1349           .digest_size  = SHA224_DIGEST_SIZE,
1350           .block_size   = SHA224_BLOCK_SIZE },
1351 };
1352 #define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls)
1353
1354 static LIST_HEAD(ahash_algs);
1355 static LIST_HEAD(hmac_algs);
1356
1357 static int algs_registered;
1358
1359 static void __n2_unregister_algs(void)
1360 {
1361         struct n2_cipher_alg *cipher, *cipher_tmp;
1362         struct n2_ahash_alg *alg, *alg_tmp;
1363         struct n2_hmac_alg *hmac, *hmac_tmp;
1364
1365         list_for_each_entry_safe(cipher, cipher_tmp, &cipher_algs, entry) {
1366                 crypto_unregister_alg(&cipher->alg);
1367                 list_del(&cipher->entry);
1368                 kfree(cipher);
1369         }
1370         list_for_each_entry_safe(hmac, hmac_tmp, &hmac_algs, derived.entry) {
1371                 crypto_unregister_ahash(&hmac->derived.alg);
1372                 list_del(&hmac->derived.entry);
1373                 kfree(hmac);
1374         }
1375         list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) {
1376                 crypto_unregister_ahash(&alg->alg);
1377                 list_del(&alg->entry);
1378                 kfree(alg);
1379         }
1380 }
1381
1382 static int n2_cipher_cra_init(struct crypto_tfm *tfm)
1383 {
1384         tfm->crt_ablkcipher.reqsize = sizeof(struct n2_request_context);
1385         return 0;
1386 }
1387
1388 static int __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl)
1389 {
1390         struct n2_cipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1391         struct crypto_alg *alg;
1392         int err;
1393
1394         if (!p)
1395                 return -ENOMEM;
1396
1397         alg = &p->alg;
1398
1399         snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
1400         snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name);
1401         alg->cra_priority = N2_CRA_PRIORITY;
1402         alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1403                          CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC;
1404         alg->cra_blocksize = tmpl->block_size;
1405         p->enc_type = tmpl->enc_type;
1406         alg->cra_ctxsize = sizeof(struct n2_cipher_context);
1407         alg->cra_type = &crypto_ablkcipher_type;
1408         alg->cra_u.ablkcipher = tmpl->ablkcipher;
1409         alg->cra_init = n2_cipher_cra_init;
1410         alg->cra_module = THIS_MODULE;
1411
1412         list_add(&p->entry, &cipher_algs);
1413         err = crypto_register_alg(alg);
1414         if (err) {
1415                 pr_err("%s alg registration failed\n", alg->cra_name);
1416                 list_del(&p->entry);
1417                 kfree(p);
1418         } else {
1419                 pr_info("%s alg registered\n", alg->cra_name);
1420         }
1421         return err;
1422 }
1423
1424 static int __n2_register_one_hmac(struct n2_ahash_alg *n2ahash)
1425 {
1426         struct n2_hmac_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1427         struct ahash_alg *ahash;
1428         struct crypto_alg *base;
1429         int err;
1430
1431         if (!p)
1432                 return -ENOMEM;
1433
1434         p->child_alg = n2ahash->alg.halg.base.cra_name;
1435         memcpy(&p->derived, n2ahash, sizeof(struct n2_ahash_alg));
1436         INIT_LIST_HEAD(&p->derived.entry);
1437
1438         ahash = &p->derived.alg;
1439         ahash->digest = n2_hmac_async_digest;
1440         ahash->setkey = n2_hmac_async_setkey;
1441
1442         base = &ahash->halg.base;
1443         snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", p->child_alg);
1444         snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s-n2", p->child_alg);
1445
1446         base->cra_ctxsize = sizeof(struct n2_hmac_ctx);
1447         base->cra_init = n2_hmac_cra_init;
1448         base->cra_exit = n2_hmac_cra_exit;
1449
1450         list_add(&p->derived.entry, &hmac_algs);
1451         err = crypto_register_ahash(ahash);
1452         if (err) {
1453                 pr_err("%s alg registration failed\n", base->cra_name);
1454                 list_del(&p->derived.entry);
1455                 kfree(p);
1456         } else {
1457                 pr_info("%s alg registered\n", base->cra_name);
1458         }
1459         return err;
1460 }
1461
1462 static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
1463 {
1464         struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1465         struct hash_alg_common *halg;
1466         struct crypto_alg *base;
1467         struct ahash_alg *ahash;
1468         int err;
1469
1470         if (!p)
1471                 return -ENOMEM;
1472
1473         p->hash_zero = tmpl->hash_zero;
1474         p->hash_init = tmpl->hash_init;
1475         p->auth_type = tmpl->auth_type;
1476         p->hmac_type = tmpl->hmac_type;
1477         p->hw_op_hashsz = tmpl->hw_op_hashsz;
1478         p->digest_size = tmpl->digest_size;
1479
1480         ahash = &p->alg;
1481         ahash->init = n2_hash_async_init;
1482         ahash->update = n2_hash_async_update;
1483         ahash->final = n2_hash_async_final;
1484         ahash->finup = n2_hash_async_finup;
1485         ahash->digest = n2_hash_async_digest;
1486
1487         halg = &ahash->halg;
1488         halg->digestsize = tmpl->digest_size;
1489
1490         base = &halg->base;
1491         snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
1492         snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name);
1493         base->cra_priority = N2_CRA_PRIORITY;
1494         base->cra_flags = CRYPTO_ALG_TYPE_AHASH |
1495                           CRYPTO_ALG_KERN_DRIVER_ONLY |
1496                           CRYPTO_ALG_NEED_FALLBACK;
1497         base->cra_blocksize = tmpl->block_size;
1498         base->cra_ctxsize = sizeof(struct n2_hash_ctx);
1499         base->cra_module = THIS_MODULE;
1500         base->cra_init = n2_hash_cra_init;
1501         base->cra_exit = n2_hash_cra_exit;
1502
1503         list_add(&p->entry, &ahash_algs);
1504         err = crypto_register_ahash(ahash);
1505         if (err) {
1506                 pr_err("%s alg registration failed\n", base->cra_name);
1507                 list_del(&p->entry);
1508                 kfree(p);
1509         } else {
1510                 pr_info("%s alg registered\n", base->cra_name);
1511         }
1512         if (!err && p->hmac_type != AUTH_TYPE_RESERVED)
1513                 err = __n2_register_one_hmac(p);
1514         return err;
1515 }
1516
1517 static int n2_register_algs(void)
1518 {
1519         int i, err = 0;
1520
1521         mutex_lock(&spu_lock);
1522         if (algs_registered++)
1523                 goto out;
1524
1525         for (i = 0; i < NUM_HASH_TMPLS; i++) {
1526                 err = __n2_register_one_ahash(&hash_tmpls[i]);
1527                 if (err) {
1528                         __n2_unregister_algs();
1529                         goto out;
1530                 }
1531         }
1532         for (i = 0; i < NUM_CIPHER_TMPLS; i++) {
1533                 err = __n2_register_one_cipher(&cipher_tmpls[i]);
1534                 if (err) {
1535                         __n2_unregister_algs();
1536                         goto out;
1537                 }
1538         }
1539
1540 out:
1541         mutex_unlock(&spu_lock);
1542         return err;
1543 }
1544
1545 static void n2_unregister_algs(void)
1546 {
1547         mutex_lock(&spu_lock);
1548         if (!--algs_registered)
1549                 __n2_unregister_algs();
1550         mutex_unlock(&spu_lock);
1551 }
1552
1553 /* To map CWQ queues to interrupt sources, the hypervisor API provides
1554  * a devino.  This isn't very useful to us because all of the
1555  * interrupts listed in the device_node have been translated to
1556  * Linux virtual IRQ cookie numbers.
1557  *
1558  * So we have to back-translate, going through the 'intr' and 'ino'
1559  * property tables of the n2cp MDESC node, matching it with the OF
1560  * 'interrupts' property entries, in order to to figure out which
1561  * devino goes to which already-translated IRQ.
1562  */
1563 static int find_devino_index(struct platform_device *dev, struct spu_mdesc_info *ip,
1564                              unsigned long dev_ino)
1565 {
1566         const unsigned int *dev_intrs;
1567         unsigned int intr;
1568         int i;
1569
1570         for (i = 0; i < ip->num_intrs; i++) {
1571                 if (ip->ino_table[i].ino == dev_ino)
1572                         break;
1573         }
1574         if (i == ip->num_intrs)
1575                 return -ENODEV;
1576
1577         intr = ip->ino_table[i].intr;
1578
1579         dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL);
1580         if (!dev_intrs)
1581                 return -ENODEV;
1582
1583         for (i = 0; i < dev->archdata.num_irqs; i++) {
1584                 if (dev_intrs[i] == intr)
1585                         return i;
1586         }
1587
1588         return -ENODEV;
1589 }
1590
1591 static int spu_map_ino(struct platform_device *dev, struct spu_mdesc_info *ip,
1592                        const char *irq_name, struct spu_queue *p,
1593                        irq_handler_t handler)
1594 {
1595         unsigned long herr;
1596         int index;
1597
1598         herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino);
1599         if (herr)
1600                 return -EINVAL;
1601
1602         index = find_devino_index(dev, ip, p->devino);
1603         if (index < 0)
1604                 return index;
1605
1606         p->irq = dev->archdata.irqs[index];
1607
1608         sprintf(p->irq_name, "%s-%d", irq_name, index);
1609
1610         return request_irq(p->irq, handler, 0, p->irq_name, p);
1611 }
1612
1613 static struct kmem_cache *queue_cache[2];
1614
1615 static void *new_queue(unsigned long q_type)
1616 {
1617         return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL);
1618 }
1619
1620 static void free_queue(void *p, unsigned long q_type)
1621 {
1622         return kmem_cache_free(queue_cache[q_type - 1], p);
1623 }
1624
1625 static int queue_cache_init(void)
1626 {
1627         if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
1628                 queue_cache[HV_NCS_QTYPE_MAU - 1] =
1629                         kmem_cache_create("mau_queue",
1630                                           (MAU_NUM_ENTRIES *
1631                                            MAU_ENTRY_SIZE),
1632                                           MAU_ENTRY_SIZE, 0, NULL);
1633         if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
1634                 return -ENOMEM;
1635
1636         if (!queue_cache[HV_NCS_QTYPE_CWQ - 1])
1637                 queue_cache[HV_NCS_QTYPE_CWQ - 1] =
1638                         kmem_cache_create("cwq_queue",
1639                                           (CWQ_NUM_ENTRIES *
1640                                            CWQ_ENTRY_SIZE),
1641                                           CWQ_ENTRY_SIZE, 0, NULL);
1642         if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
1643                 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
1644                 queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
1645                 return -ENOMEM;
1646         }
1647         return 0;
1648 }
1649
1650 static void queue_cache_destroy(void)
1651 {
1652         kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
1653         kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
1654         queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
1655         queue_cache[HV_NCS_QTYPE_CWQ - 1] = NULL;
1656 }
1657
1658 static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
1659 {
1660         cpumask_var_t old_allowed;
1661         unsigned long hv_ret;
1662
1663         if (cpumask_empty(&p->sharing))
1664                 return -EINVAL;
1665
1666         if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL))
1667                 return -ENOMEM;
1668
1669         cpumask_copy(old_allowed, &current->cpus_allowed);
1670
1671         set_cpus_allowed_ptr(current, &p->sharing);
1672
1673         hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q),
1674                                  CWQ_NUM_ENTRIES, &p->qhandle);
1675         if (!hv_ret)
1676                 sun4v_ncs_sethead_marker(p->qhandle, 0);
1677
1678         set_cpus_allowed_ptr(current, old_allowed);
1679
1680         free_cpumask_var(old_allowed);
1681
1682         return (hv_ret ? -EINVAL : 0);
1683 }
1684
1685 static int spu_queue_setup(struct spu_queue *p)
1686 {
1687         int err;
1688
1689         p->q = new_queue(p->q_type);
1690         if (!p->q)
1691                 return -ENOMEM;
1692
1693         err = spu_queue_register(p, p->q_type);
1694         if (err) {
1695                 free_queue(p->q, p->q_type);
1696                 p->q = NULL;
1697         }
1698
1699         return err;
1700 }
1701
1702 static void spu_queue_destroy(struct spu_queue *p)
1703 {
1704         unsigned long hv_ret;
1705
1706         if (!p->q)
1707                 return;
1708
1709         hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle);
1710
1711         if (!hv_ret)
1712                 free_queue(p->q, p->q_type);
1713 }
1714
1715 static void spu_list_destroy(struct list_head *list)
1716 {
1717         struct spu_queue *p, *n;
1718
1719         list_for_each_entry_safe(p, n, list, list) {
1720                 int i;
1721
1722                 for (i = 0; i < NR_CPUS; i++) {
1723                         if (cpu_to_cwq[i] == p)
1724                                 cpu_to_cwq[i] = NULL;
1725                 }
1726
1727                 if (p->irq) {
1728                         free_irq(p->irq, p);
1729                         p->irq = 0;
1730                 }
1731                 spu_queue_destroy(p);
1732                 list_del(&p->list);
1733                 kfree(p);
1734         }
1735 }
1736
1737 /* Walk the backward arcs of a CWQ 'exec-unit' node,
1738  * gathering cpu membership information.
1739  */
1740 static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc,
1741                                struct platform_device *dev,
1742                                u64 node, struct spu_queue *p,
1743                                struct spu_queue **table)
1744 {
1745         u64 arc;
1746
1747         mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) {
1748                 u64 tgt = mdesc_arc_target(mdesc, arc);
1749                 const char *name = mdesc_node_name(mdesc, tgt);
1750                 const u64 *id;
1751
1752                 if (strcmp(name, "cpu"))
1753                         continue;
1754                 id = mdesc_get_property(mdesc, tgt, "id", NULL);
1755                 if (table[*id] != NULL) {
1756                         dev_err(&dev->dev, "%s: SPU cpu slot already set.\n",
1757                                 dev->dev.of_node->full_name);
1758                         return -EINVAL;
1759                 }
1760                 cpumask_set_cpu(*id, &p->sharing);
1761                 table[*id] = p;
1762         }
1763         return 0;
1764 }
1765
1766 /* Process an 'exec-unit' MDESC node of type 'cwq'.  */
1767 static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list,
1768                             struct platform_device *dev, struct mdesc_handle *mdesc,
1769                             u64 node, const char *iname, unsigned long q_type,
1770                             irq_handler_t handler, struct spu_queue **table)
1771 {
1772         struct spu_queue *p;
1773         int err;
1774
1775         p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL);
1776         if (!p) {
1777                 dev_err(&dev->dev, "%s: Could not allocate SPU queue.\n",
1778                         dev->dev.of_node->full_name);
1779                 return -ENOMEM;
1780         }
1781
1782         cpumask_clear(&p->sharing);
1783         spin_lock_init(&p->lock);
1784         p->q_type = q_type;
1785         INIT_LIST_HEAD(&p->jobs);
1786         list_add(&p->list, list);
1787
1788         err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table);
1789         if (err)
1790                 return err;
1791
1792         err = spu_queue_setup(p);
1793         if (err)
1794                 return err;
1795
1796         return spu_map_ino(dev, ip, iname, p, handler);
1797 }
1798
1799 static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct platform_device *dev,
1800                           struct spu_mdesc_info *ip, struct list_head *list,
1801                           const char *exec_name, unsigned long q_type,
1802                           irq_handler_t handler, struct spu_queue **table)
1803 {
1804         int err = 0;
1805         u64 node;
1806
1807         mdesc_for_each_node_by_name(mdesc, node, "exec-unit") {
1808                 const char *type;
1809
1810                 type = mdesc_get_property(mdesc, node, "type", NULL);
1811                 if (!type || strcmp(type, exec_name))
1812                         continue;
1813
1814                 err = handle_exec_unit(ip, list, dev, mdesc, node,
1815                                        exec_name, q_type, handler, table);
1816                 if (err) {
1817                         spu_list_destroy(list);
1818                         break;
1819                 }
1820         }
1821
1822         return err;
1823 }
1824
1825 static int get_irq_props(struct mdesc_handle *mdesc, u64 node,
1826                          struct spu_mdesc_info *ip)
1827 {
1828         const u64 *ino;
1829         int ino_len;
1830         int i;
1831
1832         ino = mdesc_get_property(mdesc, node, "ino", &ino_len);
1833         if (!ino) {
1834                 printk("NO 'ino'\n");
1835                 return -ENODEV;
1836         }
1837
1838         ip->num_intrs = ino_len / sizeof(u64);
1839         ip->ino_table = kzalloc((sizeof(struct ino_blob) *
1840                                  ip->num_intrs),
1841                                 GFP_KERNEL);
1842         if (!ip->ino_table)
1843                 return -ENOMEM;
1844
1845         for (i = 0; i < ip->num_intrs; i++) {
1846                 struct ino_blob *b = &ip->ino_table[i];
1847                 b->intr = i + 1;
1848                 b->ino = ino[i];
1849         }
1850
1851         return 0;
1852 }
1853
1854 static int grab_mdesc_irq_props(struct mdesc_handle *mdesc,
1855                                 struct platform_device *dev,
1856                                 struct spu_mdesc_info *ip,
1857                                 const char *node_name)
1858 {
1859         const unsigned int *reg;
1860         u64 node;
1861
1862         reg = of_get_property(dev->dev.of_node, "reg", NULL);
1863         if (!reg)
1864                 return -ENODEV;
1865
1866         mdesc_for_each_node_by_name(mdesc, node, "virtual-device") {
1867                 const char *name;
1868                 const u64 *chdl;
1869
1870                 name = mdesc_get_property(mdesc, node, "name", NULL);
1871                 if (!name || strcmp(name, node_name))
1872                         continue;
1873                 chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL);
1874                 if (!chdl || (*chdl != *reg))
1875                         continue;
1876                 ip->cfg_handle = *chdl;
1877                 return get_irq_props(mdesc, node, ip);
1878         }
1879
1880         return -ENODEV;
1881 }
1882
1883 static unsigned long n2_spu_hvapi_major;
1884 static unsigned long n2_spu_hvapi_minor;
1885
1886 static int n2_spu_hvapi_register(void)
1887 {
1888         int err;
1889
1890         n2_spu_hvapi_major = 2;
1891         n2_spu_hvapi_minor = 0;
1892
1893         err = sun4v_hvapi_register(HV_GRP_NCS,
1894                                    n2_spu_hvapi_major,
1895                                    &n2_spu_hvapi_minor);
1896
1897         if (!err)
1898                 pr_info("Registered NCS HVAPI version %lu.%lu\n",
1899                         n2_spu_hvapi_major,
1900                         n2_spu_hvapi_minor);
1901
1902         return err;
1903 }
1904
1905 static void n2_spu_hvapi_unregister(void)
1906 {
1907         sun4v_hvapi_unregister(HV_GRP_NCS);
1908 }
1909
1910 static int global_ref;
1911
1912 static int grab_global_resources(void)
1913 {
1914         int err = 0;
1915
1916         mutex_lock(&spu_lock);
1917
1918         if (global_ref++)
1919                 goto out;
1920
1921         err = n2_spu_hvapi_register();
1922         if (err)
1923                 goto out;
1924
1925         err = queue_cache_init();
1926         if (err)
1927                 goto out_hvapi_release;
1928
1929         err = -ENOMEM;
1930         cpu_to_cwq = kzalloc(sizeof(struct spu_queue *) * NR_CPUS,
1931                              GFP_KERNEL);
1932         if (!cpu_to_cwq)
1933                 goto out_queue_cache_destroy;
1934
1935         cpu_to_mau = kzalloc(sizeof(struct spu_queue *) * NR_CPUS,
1936                              GFP_KERNEL);
1937         if (!cpu_to_mau)
1938                 goto out_free_cwq_table;
1939
1940         err = 0;
1941
1942 out:
1943         if (err)
1944                 global_ref--;
1945         mutex_unlock(&spu_lock);
1946         return err;
1947
1948 out_free_cwq_table:
1949         kfree(cpu_to_cwq);
1950         cpu_to_cwq = NULL;
1951
1952 out_queue_cache_destroy:
1953         queue_cache_destroy();
1954
1955 out_hvapi_release:
1956         n2_spu_hvapi_unregister();
1957         goto out;
1958 }
1959
1960 static void release_global_resources(void)
1961 {
1962         mutex_lock(&spu_lock);
1963         if (!--global_ref) {
1964                 kfree(cpu_to_cwq);
1965                 cpu_to_cwq = NULL;
1966
1967                 kfree(cpu_to_mau);
1968                 cpu_to_mau = NULL;
1969
1970                 queue_cache_destroy();
1971                 n2_spu_hvapi_unregister();
1972         }
1973         mutex_unlock(&spu_lock);
1974 }
1975
1976 static struct n2_crypto *alloc_n2cp(void)
1977 {
1978         struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL);
1979
1980         if (np)
1981                 INIT_LIST_HEAD(&np->cwq_list);
1982
1983         return np;
1984 }
1985
1986 static void free_n2cp(struct n2_crypto *np)
1987 {
1988         if (np->cwq_info.ino_table) {
1989                 kfree(np->cwq_info.ino_table);
1990                 np->cwq_info.ino_table = NULL;
1991         }
1992
1993         kfree(np);
1994 }
1995
1996 static void n2_spu_driver_version(void)
1997 {
1998         static int n2_spu_version_printed;
1999
2000         if (n2_spu_version_printed++ == 0)
2001                 pr_info("%s", version);
2002 }
2003
2004 static int n2_crypto_probe(struct platform_device *dev)
2005 {
2006         struct mdesc_handle *mdesc;
2007         const char *full_name;
2008         struct n2_crypto *np;
2009         int err;
2010
2011         n2_spu_driver_version();
2012
2013         full_name = dev->dev.of_node->full_name;
2014         pr_info("Found N2CP at %s\n", full_name);
2015
2016         np = alloc_n2cp();
2017         if (!np) {
2018                 dev_err(&dev->dev, "%s: Unable to allocate n2cp.\n",
2019                         full_name);
2020                 return -ENOMEM;
2021         }
2022
2023         err = grab_global_resources();
2024         if (err) {
2025                 dev_err(&dev->dev, "%s: Unable to grab "
2026                         "global resources.\n", full_name);
2027                 goto out_free_n2cp;
2028         }
2029
2030         mdesc = mdesc_grab();
2031
2032         if (!mdesc) {
2033                 dev_err(&dev->dev, "%s: Unable to grab MDESC.\n",
2034                         full_name);
2035                 err = -ENODEV;
2036                 goto out_free_global;
2037         }
2038         err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp");
2039         if (err) {
2040                 dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n",
2041                         full_name);
2042                 mdesc_release(mdesc);
2043                 goto out_free_global;
2044         }
2045
2046         err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list,
2047                              "cwq", HV_NCS_QTYPE_CWQ, cwq_intr,
2048                              cpu_to_cwq);
2049         mdesc_release(mdesc);
2050
2051         if (err) {
2052                 dev_err(&dev->dev, "%s: CWQ MDESC scan failed.\n",
2053                         full_name);
2054                 goto out_free_global;
2055         }
2056
2057         err = n2_register_algs();
2058         if (err) {
2059                 dev_err(&dev->dev, "%s: Unable to register algorithms.\n",
2060                         full_name);
2061                 goto out_free_spu_list;
2062         }
2063
2064         dev_set_drvdata(&dev->dev, np);
2065
2066         return 0;
2067
2068 out_free_spu_list:
2069         spu_list_destroy(&np->cwq_list);
2070
2071 out_free_global:
2072         release_global_resources();
2073
2074 out_free_n2cp:
2075         free_n2cp(np);
2076
2077         return err;
2078 }
2079
2080 static int n2_crypto_remove(struct platform_device *dev)
2081 {
2082         struct n2_crypto *np = dev_get_drvdata(&dev->dev);
2083
2084         n2_unregister_algs();
2085
2086         spu_list_destroy(&np->cwq_list);
2087
2088         release_global_resources();
2089
2090         free_n2cp(np);
2091
2092         return 0;
2093 }
2094
2095 static struct n2_mau *alloc_ncp(void)
2096 {
2097         struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL);
2098
2099         if (mp)
2100                 INIT_LIST_HEAD(&mp->mau_list);
2101
2102         return mp;
2103 }
2104
2105 static void free_ncp(struct n2_mau *mp)
2106 {
2107         if (mp->mau_info.ino_table) {
2108                 kfree(mp->mau_info.ino_table);
2109                 mp->mau_info.ino_table = NULL;
2110         }
2111
2112         kfree(mp);
2113 }
2114
2115 static int n2_mau_probe(struct platform_device *dev)
2116 {
2117         struct mdesc_handle *mdesc;
2118         const char *full_name;
2119         struct n2_mau *mp;
2120         int err;
2121
2122         n2_spu_driver_version();
2123
2124         full_name = dev->dev.of_node->full_name;
2125         pr_info("Found NCP at %s\n", full_name);
2126
2127         mp = alloc_ncp();
2128         if (!mp) {
2129                 dev_err(&dev->dev, "%s: Unable to allocate ncp.\n",
2130                         full_name);
2131                 return -ENOMEM;
2132         }
2133
2134         err = grab_global_resources();
2135         if (err) {
2136                 dev_err(&dev->dev, "%s: Unable to grab "
2137                         "global resources.\n", full_name);
2138                 goto out_free_ncp;
2139         }
2140
2141         mdesc = mdesc_grab();
2142
2143         if (!mdesc) {
2144                 dev_err(&dev->dev, "%s: Unable to grab MDESC.\n",
2145                         full_name);
2146                 err = -ENODEV;
2147                 goto out_free_global;
2148         }
2149
2150         err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp");
2151         if (err) {
2152                 dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n",
2153                         full_name);
2154                 mdesc_release(mdesc);
2155                 goto out_free_global;
2156         }
2157
2158         err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list,
2159                              "mau", HV_NCS_QTYPE_MAU, mau_intr,
2160                              cpu_to_mau);
2161         mdesc_release(mdesc);
2162
2163         if (err) {
2164                 dev_err(&dev->dev, "%s: MAU MDESC scan failed.\n",
2165                         full_name);
2166                 goto out_free_global;
2167         }
2168
2169         dev_set_drvdata(&dev->dev, mp);
2170
2171         return 0;
2172
2173 out_free_global:
2174         release_global_resources();
2175
2176 out_free_ncp:
2177         free_ncp(mp);
2178
2179         return err;
2180 }
2181
2182 static int n2_mau_remove(struct platform_device *dev)
2183 {
2184         struct n2_mau *mp = dev_get_drvdata(&dev->dev);
2185
2186         spu_list_destroy(&mp->mau_list);
2187
2188         release_global_resources();
2189
2190         free_ncp(mp);
2191
2192         return 0;
2193 }
2194
2195 static struct of_device_id n2_crypto_match[] = {
2196         {
2197                 .name = "n2cp",
2198                 .compatible = "SUNW,n2-cwq",
2199         },
2200         {
2201                 .name = "n2cp",
2202                 .compatible = "SUNW,vf-cwq",
2203         },
2204         {
2205                 .name = "n2cp",
2206                 .compatible = "SUNW,kt-cwq",
2207         },
2208         {},
2209 };
2210
2211 MODULE_DEVICE_TABLE(of, n2_crypto_match);
2212
2213 static struct platform_driver n2_crypto_driver = {
2214         .driver = {
2215                 .name           =       "n2cp",
2216                 .of_match_table =       n2_crypto_match,
2217         },
2218         .probe          =       n2_crypto_probe,
2219         .remove         =       n2_crypto_remove,
2220 };
2221
2222 static struct of_device_id n2_mau_match[] = {
2223         {
2224                 .name = "ncp",
2225                 .compatible = "SUNW,n2-mau",
2226         },
2227         {
2228                 .name = "ncp",
2229                 .compatible = "SUNW,vf-mau",
2230         },
2231         {
2232                 .name = "ncp",
2233                 .compatible = "SUNW,kt-mau",
2234         },
2235         {},
2236 };
2237
2238 MODULE_DEVICE_TABLE(of, n2_mau_match);
2239
2240 static struct platform_driver n2_mau_driver = {
2241         .driver = {
2242                 .name           =       "ncp",
2243                 .of_match_table =       n2_mau_match,
2244         },
2245         .probe          =       n2_mau_probe,
2246         .remove         =       n2_mau_remove,
2247 };
2248
2249 static int __init n2_init(void)
2250 {
2251         int err = platform_driver_register(&n2_crypto_driver);
2252
2253         if (!err) {
2254                 err = platform_driver_register(&n2_mau_driver);
2255                 if (err)
2256                         platform_driver_unregister(&n2_crypto_driver);
2257         }
2258         return err;
2259 }
2260
2261 static void __exit n2_exit(void)
2262 {
2263         platform_driver_unregister(&n2_mau_driver);
2264         platform_driver_unregister(&n2_crypto_driver);
2265 }
2266
2267 module_init(n2_init);
2268 module_exit(n2_exit);