2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4 * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
5 * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
6 * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
7 * Copyright (c) 2018, Covalent IO, Inc. http://covalent.io
9 * This software is available to you under a choice of one of two
10 * licenses. You may choose to be licensed under the terms of the GNU
11 * General Public License (GPL) Version 2, available from the file
12 * COPYING in the main directory of this source tree, or the
13 * OpenIB.org BSD license below:
15 * Redistribution and use in source and binary forms, with or
16 * without modification, are permitted provided that the following
19 * - Redistributions of source code must retain the above
20 * copyright notice, this list of conditions and the following
23 * - Redistributions in binary form must reproduce the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer in the documentation and/or other materials
26 * provided with the distribution.
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 #include <linux/sched/signal.h>
39 #include <linux/module.h>
40 #include <crypto/aead.h>
42 #include <net/strparser.h>
45 static int __skb_nsg(struct sk_buff *skb, int offset, int len,
46 unsigned int recursion_level)
48 int start = skb_headlen(skb);
49 int i, chunk = start - offset;
50 struct sk_buff *frag_iter;
53 if (unlikely(recursion_level >= 24))
66 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
69 WARN_ON(start > offset + len);
71 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
85 if (unlikely(skb_has_frag_list(skb))) {
86 skb_walk_frags(skb, frag_iter) {
89 WARN_ON(start > offset + len);
91 end = start + frag_iter->len;
96 ret = __skb_nsg(frag_iter, offset - start, chunk,
98 if (unlikely(ret < 0))
113 /* Return the number of scatterlist elements required to completely map the
114 * skb, or -EMSGSIZE if the recursion depth is exceeded.
116 static int skb_nsg(struct sk_buff *skb, int offset, int len)
118 return __skb_nsg(skb, offset, len, 0);
121 static int padding_length(struct tls_sw_context_rx *ctx,
122 struct tls_prot_info *prot, struct sk_buff *skb)
124 struct strp_msg *rxm = strp_msg(skb);
127 /* Determine zero-padding length */
128 if (prot->version == TLS_1_3_VERSION) {
129 char content_type = 0;
133 while (content_type == 0) {
134 if (back > rxm->full_len - prot->prepend_size)
136 err = skb_copy_bits(skb,
137 rxm->offset + rxm->full_len - back,
146 ctx->control = content_type;
151 static void tls_decrypt_done(struct crypto_async_request *req, int err)
153 struct aead_request *aead_req = (struct aead_request *)req;
154 struct scatterlist *sgout = aead_req->dst;
155 struct scatterlist *sgin = aead_req->src;
156 struct tls_sw_context_rx *ctx;
157 struct tls_context *tls_ctx;
158 struct tls_prot_info *prot;
159 struct scatterlist *sg;
164 skb = (struct sk_buff *)req->data;
165 tls_ctx = tls_get_ctx(skb->sk);
166 ctx = tls_sw_ctx_rx(tls_ctx);
167 prot = &tls_ctx->prot_info;
169 /* Propagate if there was an err */
171 ctx->async_wait.err = err;
172 tls_err_abort(skb->sk, err);
174 struct strp_msg *rxm = strp_msg(skb);
177 pad = padding_length(ctx, prot, skb);
179 ctx->async_wait.err = pad;
180 tls_err_abort(skb->sk, pad);
182 rxm->full_len -= pad;
183 rxm->offset += prot->prepend_size;
184 rxm->full_len -= prot->overhead_size;
188 /* After using skb->sk to propagate sk through crypto async callback
189 * we need to NULL it again.
194 /* Free the destination pages if skb was not decrypted inplace */
196 /* Skip the first S/G entry as it points to AAD */
197 for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
200 put_page(sg_page(sg));
206 spin_lock_bh(&ctx->decrypt_compl_lock);
207 pending = atomic_dec_return(&ctx->decrypt_pending);
209 if (!pending && ctx->async_notify)
210 complete(&ctx->async_wait.completion);
211 spin_unlock_bh(&ctx->decrypt_compl_lock);
214 static int tls_do_decryption(struct sock *sk,
216 struct scatterlist *sgin,
217 struct scatterlist *sgout,
220 struct aead_request *aead_req,
223 struct tls_context *tls_ctx = tls_get_ctx(sk);
224 struct tls_prot_info *prot = &tls_ctx->prot_info;
225 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
228 aead_request_set_tfm(aead_req, ctx->aead_recv);
229 aead_request_set_ad(aead_req, prot->aad_size);
230 aead_request_set_crypt(aead_req, sgin, sgout,
231 data_len + prot->tag_size,
235 /* Using skb->sk to push sk through to crypto async callback
236 * handler. This allows propagating errors up to the socket
237 * if needed. It _must_ be cleared in the async handler
238 * before consume_skb is called. We _know_ skb->sk is NULL
239 * because it is a clone from strparser.
242 aead_request_set_callback(aead_req,
243 CRYPTO_TFM_REQ_MAY_BACKLOG,
244 tls_decrypt_done, skb);
245 atomic_inc(&ctx->decrypt_pending);
247 aead_request_set_callback(aead_req,
248 CRYPTO_TFM_REQ_MAY_BACKLOG,
249 crypto_req_done, &ctx->async_wait);
252 ret = crypto_aead_decrypt(aead_req);
253 if (ret == -EINPROGRESS) {
257 ret = crypto_wait_req(ret, &ctx->async_wait);
261 atomic_dec(&ctx->decrypt_pending);
266 static void tls_trim_both_msgs(struct sock *sk, int target_size)
268 struct tls_context *tls_ctx = tls_get_ctx(sk);
269 struct tls_prot_info *prot = &tls_ctx->prot_info;
270 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
271 struct tls_rec *rec = ctx->open_rec;
273 sk_msg_trim(sk, &rec->msg_plaintext, target_size);
275 target_size += prot->overhead_size;
276 sk_msg_trim(sk, &rec->msg_encrypted, target_size);
279 static int tls_alloc_encrypted_msg(struct sock *sk, int len)
281 struct tls_context *tls_ctx = tls_get_ctx(sk);
282 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
283 struct tls_rec *rec = ctx->open_rec;
284 struct sk_msg *msg_en = &rec->msg_encrypted;
286 return sk_msg_alloc(sk, msg_en, len, 0);
289 static int tls_clone_plaintext_msg(struct sock *sk, int required)
291 struct tls_context *tls_ctx = tls_get_ctx(sk);
292 struct tls_prot_info *prot = &tls_ctx->prot_info;
293 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
294 struct tls_rec *rec = ctx->open_rec;
295 struct sk_msg *msg_pl = &rec->msg_plaintext;
296 struct sk_msg *msg_en = &rec->msg_encrypted;
299 /* We add page references worth len bytes from encrypted sg
300 * at the end of plaintext sg. It is guaranteed that msg_en
301 * has enough required room (ensured by caller).
303 len = required - msg_pl->sg.size;
305 /* Skip initial bytes in msg_en's data to be able to use
306 * same offset of both plain and encrypted data.
308 skip = prot->prepend_size + msg_pl->sg.size;
310 return sk_msg_clone(sk, msg_pl, msg_en, skip, len);
313 static struct tls_rec *tls_get_rec(struct sock *sk)
315 struct tls_context *tls_ctx = tls_get_ctx(sk);
316 struct tls_prot_info *prot = &tls_ctx->prot_info;
317 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
318 struct sk_msg *msg_pl, *msg_en;
322 mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(ctx->aead_send);
324 rec = kzalloc(mem_size, sk->sk_allocation);
328 msg_pl = &rec->msg_plaintext;
329 msg_en = &rec->msg_encrypted;
334 sg_init_table(rec->sg_aead_in, 2);
335 sg_set_buf(&rec->sg_aead_in[0], rec->aad_space, prot->aad_size);
336 sg_unmark_end(&rec->sg_aead_in[1]);
338 sg_init_table(rec->sg_aead_out, 2);
339 sg_set_buf(&rec->sg_aead_out[0], rec->aad_space, prot->aad_size);
340 sg_unmark_end(&rec->sg_aead_out[1]);
345 static void tls_free_rec(struct sock *sk, struct tls_rec *rec)
347 sk_msg_free(sk, &rec->msg_encrypted);
348 sk_msg_free(sk, &rec->msg_plaintext);
352 static void tls_free_open_rec(struct sock *sk)
354 struct tls_context *tls_ctx = tls_get_ctx(sk);
355 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
356 struct tls_rec *rec = ctx->open_rec;
359 tls_free_rec(sk, rec);
360 ctx->open_rec = NULL;
364 int tls_tx_records(struct sock *sk, int flags)
366 struct tls_context *tls_ctx = tls_get_ctx(sk);
367 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
368 struct tls_rec *rec, *tmp;
369 struct sk_msg *msg_en;
370 int tx_flags, rc = 0;
372 if (tls_is_partially_sent_record(tls_ctx)) {
373 rec = list_first_entry(&ctx->tx_list,
374 struct tls_rec, list);
377 tx_flags = rec->tx_flags;
381 rc = tls_push_partial_record(sk, tls_ctx, tx_flags);
385 /* Full record has been transmitted.
386 * Remove the head of tx_list
388 list_del(&rec->list);
389 sk_msg_free(sk, &rec->msg_plaintext);
393 /* Tx all ready records */
394 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
395 if (READ_ONCE(rec->tx_ready)) {
397 tx_flags = rec->tx_flags;
401 msg_en = &rec->msg_encrypted;
402 rc = tls_push_sg(sk, tls_ctx,
403 &msg_en->sg.data[msg_en->sg.curr],
408 list_del(&rec->list);
409 sk_msg_free(sk, &rec->msg_plaintext);
417 if (rc < 0 && rc != -EAGAIN)
418 tls_err_abort(sk, EBADMSG);
423 static void tls_encrypt_done(struct crypto_async_request *req, int err)
425 struct aead_request *aead_req = (struct aead_request *)req;
426 struct sock *sk = req->data;
427 struct tls_context *tls_ctx = tls_get_ctx(sk);
428 struct tls_prot_info *prot = &tls_ctx->prot_info;
429 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
430 struct scatterlist *sge;
431 struct sk_msg *msg_en;
436 rec = container_of(aead_req, struct tls_rec, aead_req);
437 msg_en = &rec->msg_encrypted;
439 sge = sk_msg_elem(msg_en, msg_en->sg.curr);
440 sge->offset -= prot->prepend_size;
441 sge->length += prot->prepend_size;
443 /* Check if error is previously set on socket */
444 if (err || sk->sk_err) {
447 /* If err is already set on socket, return the same code */
449 ctx->async_wait.err = sk->sk_err;
451 ctx->async_wait.err = err;
452 tls_err_abort(sk, err);
457 struct tls_rec *first_rec;
459 /* Mark the record as ready for transmission */
460 smp_store_mb(rec->tx_ready, true);
462 /* If received record is at head of tx_list, schedule tx */
463 first_rec = list_first_entry(&ctx->tx_list,
464 struct tls_rec, list);
465 if (rec == first_rec)
469 spin_lock_bh(&ctx->encrypt_compl_lock);
470 pending = atomic_dec_return(&ctx->encrypt_pending);
472 if (!pending && ctx->async_notify)
473 complete(&ctx->async_wait.completion);
474 spin_unlock_bh(&ctx->encrypt_compl_lock);
479 /* Schedule the transmission */
480 if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
481 schedule_delayed_work(&ctx->tx_work.work, 1);
484 static int tls_do_encryption(struct sock *sk,
485 struct tls_context *tls_ctx,
486 struct tls_sw_context_tx *ctx,
487 struct aead_request *aead_req,
488 size_t data_len, u32 start)
490 struct tls_prot_info *prot = &tls_ctx->prot_info;
491 struct tls_rec *rec = ctx->open_rec;
492 struct sk_msg *msg_en = &rec->msg_encrypted;
493 struct scatterlist *sge = sk_msg_elem(msg_en, start);
494 int rc, iv_offset = 0;
496 /* For CCM based ciphers, first byte of IV is a constant */
497 if (prot->cipher_type == TLS_CIPHER_AES_CCM_128) {
498 rec->iv_data[0] = TLS_AES_CCM_IV_B0_BYTE;
502 memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
503 prot->iv_size + prot->salt_size);
505 xor_iv_with_seq(prot->version, rec->iv_data, tls_ctx->tx.rec_seq);
507 sge->offset += prot->prepend_size;
508 sge->length -= prot->prepend_size;
510 msg_en->sg.curr = start;
512 aead_request_set_tfm(aead_req, ctx->aead_send);
513 aead_request_set_ad(aead_req, prot->aad_size);
514 aead_request_set_crypt(aead_req, rec->sg_aead_in,
516 data_len, rec->iv_data);
518 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
519 tls_encrypt_done, sk);
521 /* Add the record in tx_list */
522 list_add_tail((struct list_head *)&rec->list, &ctx->tx_list);
523 atomic_inc(&ctx->encrypt_pending);
525 rc = crypto_aead_encrypt(aead_req);
526 if (!rc || rc != -EINPROGRESS) {
527 atomic_dec(&ctx->encrypt_pending);
528 sge->offset -= prot->prepend_size;
529 sge->length += prot->prepend_size;
533 WRITE_ONCE(rec->tx_ready, true);
534 } else if (rc != -EINPROGRESS) {
535 list_del(&rec->list);
539 /* Unhook the record from context if encryption is not failure */
540 ctx->open_rec = NULL;
541 tls_advance_record_sn(sk, prot, &tls_ctx->tx);
545 static int tls_split_open_record(struct sock *sk, struct tls_rec *from,
546 struct tls_rec **to, struct sk_msg *msg_opl,
547 struct sk_msg *msg_oen, u32 split_point,
548 u32 tx_overhead_size, u32 *orig_end)
550 u32 i, j, bytes = 0, apply = msg_opl->apply_bytes;
551 struct scatterlist *sge, *osge, *nsge;
552 u32 orig_size = msg_opl->sg.size;
553 struct scatterlist tmp = { };
554 struct sk_msg *msg_npl;
558 new = tls_get_rec(sk);
561 ret = sk_msg_alloc(sk, &new->msg_encrypted, msg_opl->sg.size +
562 tx_overhead_size, 0);
564 tls_free_rec(sk, new);
568 *orig_end = msg_opl->sg.end;
569 i = msg_opl->sg.start;
570 sge = sk_msg_elem(msg_opl, i);
571 while (apply && sge->length) {
572 if (sge->length > apply) {
573 u32 len = sge->length - apply;
575 get_page(sg_page(sge));
576 sg_set_page(&tmp, sg_page(sge), len,
577 sge->offset + apply);
582 apply -= sge->length;
583 bytes += sge->length;
586 sk_msg_iter_var_next(i);
587 if (i == msg_opl->sg.end)
589 sge = sk_msg_elem(msg_opl, i);
593 msg_opl->sg.curr = i;
594 msg_opl->sg.copybreak = 0;
595 msg_opl->apply_bytes = 0;
596 msg_opl->sg.size = bytes;
598 msg_npl = &new->msg_plaintext;
599 msg_npl->apply_bytes = apply;
600 msg_npl->sg.size = orig_size - bytes;
602 j = msg_npl->sg.start;
603 nsge = sk_msg_elem(msg_npl, j);
605 memcpy(nsge, &tmp, sizeof(*nsge));
606 sk_msg_iter_var_next(j);
607 nsge = sk_msg_elem(msg_npl, j);
610 osge = sk_msg_elem(msg_opl, i);
611 while (osge->length) {
612 memcpy(nsge, osge, sizeof(*nsge));
614 sk_msg_iter_var_next(i);
615 sk_msg_iter_var_next(j);
618 osge = sk_msg_elem(msg_opl, i);
619 nsge = sk_msg_elem(msg_npl, j);
623 msg_npl->sg.curr = j;
624 msg_npl->sg.copybreak = 0;
630 static void tls_merge_open_record(struct sock *sk, struct tls_rec *to,
631 struct tls_rec *from, u32 orig_end)
633 struct sk_msg *msg_npl = &from->msg_plaintext;
634 struct sk_msg *msg_opl = &to->msg_plaintext;
635 struct scatterlist *osge, *nsge;
639 sk_msg_iter_var_prev(i);
640 j = msg_npl->sg.start;
642 osge = sk_msg_elem(msg_opl, i);
643 nsge = sk_msg_elem(msg_npl, j);
645 if (sg_page(osge) == sg_page(nsge) &&
646 osge->offset + osge->length == nsge->offset) {
647 osge->length += nsge->length;
648 put_page(sg_page(nsge));
651 msg_opl->sg.end = orig_end;
652 msg_opl->sg.curr = orig_end;
653 msg_opl->sg.copybreak = 0;
654 msg_opl->apply_bytes = msg_opl->sg.size + msg_npl->sg.size;
655 msg_opl->sg.size += msg_npl->sg.size;
657 sk_msg_free(sk, &to->msg_encrypted);
658 sk_msg_xfer_full(&to->msg_encrypted, &from->msg_encrypted);
663 static int tls_push_record(struct sock *sk, int flags,
664 unsigned char record_type)
666 struct tls_context *tls_ctx = tls_get_ctx(sk);
667 struct tls_prot_info *prot = &tls_ctx->prot_info;
668 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
669 struct tls_rec *rec = ctx->open_rec, *tmp = NULL;
670 u32 i, split_point, uninitialized_var(orig_end);
671 struct sk_msg *msg_pl, *msg_en;
672 struct aead_request *req;
679 msg_pl = &rec->msg_plaintext;
680 msg_en = &rec->msg_encrypted;
682 split_point = msg_pl->apply_bytes;
683 split = split_point && split_point < msg_pl->sg.size;
684 if (unlikely((!split &&
686 prot->overhead_size > msg_en->sg.size) ||
689 prot->overhead_size > msg_en->sg.size))) {
691 split_point = msg_en->sg.size;
694 rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en,
695 split_point, prot->overhead_size,
699 /* This can happen if above tls_split_open_record allocates
700 * a single large encryption buffer instead of two smaller
701 * ones. In this case adjust pointers and continue without
704 if (!msg_pl->sg.size) {
705 tls_merge_open_record(sk, rec, tmp, orig_end);
706 msg_pl = &rec->msg_plaintext;
707 msg_en = &rec->msg_encrypted;
710 sk_msg_trim(sk, msg_en, msg_pl->sg.size +
711 prot->overhead_size);
714 rec->tx_flags = flags;
715 req = &rec->aead_req;
718 sk_msg_iter_var_prev(i);
720 rec->content_type = record_type;
721 if (prot->version == TLS_1_3_VERSION) {
722 /* Add content type to end of message. No padding added */
723 sg_set_buf(&rec->sg_content_type, &rec->content_type, 1);
724 sg_mark_end(&rec->sg_content_type);
725 sg_chain(msg_pl->sg.data, msg_pl->sg.end + 1,
726 &rec->sg_content_type);
728 sg_mark_end(sk_msg_elem(msg_pl, i));
731 if (msg_pl->sg.end < msg_pl->sg.start) {
732 sg_chain(&msg_pl->sg.data[msg_pl->sg.start],
733 MAX_SKB_FRAGS - msg_pl->sg.start + 1,
737 i = msg_pl->sg.start;
738 sg_chain(rec->sg_aead_in, 2, &msg_pl->sg.data[i]);
741 sk_msg_iter_var_prev(i);
742 sg_mark_end(sk_msg_elem(msg_en, i));
744 i = msg_en->sg.start;
745 sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]);
747 tls_make_aad(rec->aad_space, msg_pl->sg.size + prot->tail_size,
748 tls_ctx->tx.rec_seq, prot->rec_seq_size,
749 record_type, prot->version);
751 tls_fill_prepend(tls_ctx,
752 page_address(sg_page(&msg_en->sg.data[i])) +
753 msg_en->sg.data[i].offset,
754 msg_pl->sg.size + prot->tail_size,
755 record_type, prot->version);
757 tls_ctx->pending_open_record_frags = false;
759 rc = tls_do_encryption(sk, tls_ctx, ctx, req,
760 msg_pl->sg.size + prot->tail_size, i);
762 if (rc != -EINPROGRESS) {
763 tls_err_abort(sk, EBADMSG);
765 tls_ctx->pending_open_record_frags = true;
766 tls_merge_open_record(sk, rec, tmp, orig_end);
769 ctx->async_capable = 1;
772 msg_pl = &tmp->msg_plaintext;
773 msg_en = &tmp->msg_encrypted;
774 sk_msg_trim(sk, msg_en, msg_pl->sg.size + prot->overhead_size);
775 tls_ctx->pending_open_record_frags = true;
779 return tls_tx_records(sk, flags);
782 static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
783 bool full_record, u8 record_type,
784 ssize_t *copied, int flags)
786 struct tls_context *tls_ctx = tls_get_ctx(sk);
787 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
788 struct sk_msg msg_redir = { };
789 struct sk_psock *psock;
790 struct sock *sk_redir;
796 policy = !(flags & MSG_SENDPAGE_NOPOLICY);
797 psock = sk_psock_get(sk);
798 if (!psock || !policy) {
799 err = tls_push_record(sk, flags, record_type);
800 if (err && sk->sk_err == EBADMSG) {
801 *copied -= sk_msg_free(sk, msg);
802 tls_free_open_rec(sk);
806 sk_psock_put(sk, psock);
810 enospc = sk_msg_full(msg);
811 if (psock->eval == __SK_NONE) {
812 delta = msg->sg.size;
813 psock->eval = sk_psock_msg_verdict(sk, psock, msg);
814 delta -= msg->sg.size;
816 if (msg->cork_bytes && msg->cork_bytes > msg->sg.size &&
817 !enospc && !full_record) {
823 if (msg->apply_bytes && msg->apply_bytes < send)
824 send = msg->apply_bytes;
826 switch (psock->eval) {
828 err = tls_push_record(sk, flags, record_type);
829 if (err && sk->sk_err == EBADMSG) {
830 *copied -= sk_msg_free(sk, msg);
831 tls_free_open_rec(sk);
837 sk_redir = psock->sk_redir;
838 memcpy(&msg_redir, msg, sizeof(*msg));
839 if (msg->apply_bytes < send)
840 msg->apply_bytes = 0;
842 msg->apply_bytes -= send;
843 sk_msg_return_zero(sk, msg, send);
844 msg->sg.size -= send;
846 err = tcp_bpf_sendmsg_redir(sk_redir, &msg_redir, send, flags);
849 *copied -= sk_msg_free_nocharge(sk, &msg_redir);
852 if (msg->sg.size == 0)
853 tls_free_open_rec(sk);
857 sk_msg_free_partial(sk, msg, send);
858 if (msg->apply_bytes < send)
859 msg->apply_bytes = 0;
861 msg->apply_bytes -= send;
862 if (msg->sg.size == 0)
863 tls_free_open_rec(sk);
864 *copied -= (send + delta);
869 bool reset_eval = !ctx->open_rec;
873 msg = &rec->msg_plaintext;
874 if (!msg->apply_bytes)
878 psock->eval = __SK_NONE;
879 if (psock->sk_redir) {
880 sock_put(psock->sk_redir);
881 psock->sk_redir = NULL;
888 sk_psock_put(sk, psock);
892 static int tls_sw_push_pending_record(struct sock *sk, int flags)
894 struct tls_context *tls_ctx = tls_get_ctx(sk);
895 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
896 struct tls_rec *rec = ctx->open_rec;
897 struct sk_msg *msg_pl;
903 msg_pl = &rec->msg_plaintext;
904 copied = msg_pl->sg.size;
908 return bpf_exec_tx_verdict(msg_pl, sk, true, TLS_RECORD_TYPE_DATA,
912 int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
914 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
915 struct tls_context *tls_ctx = tls_get_ctx(sk);
916 struct tls_prot_info *prot = &tls_ctx->prot_info;
917 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
918 bool async_capable = ctx->async_capable;
919 unsigned char record_type = TLS_RECORD_TYPE_DATA;
920 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
921 bool eor = !(msg->msg_flags & MSG_MORE);
924 struct sk_msg *msg_pl, *msg_en;
935 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
938 mutex_lock(&tls_ctx->tx_lock);
941 if (unlikely(msg->msg_controllen)) {
942 ret = tls_proccess_cmsg(sk, msg, &record_type);
944 if (ret == -EINPROGRESS)
946 else if (ret != -EAGAIN)
951 while (msg_data_left(msg)) {
960 rec = ctx->open_rec = tls_get_rec(sk);
966 msg_pl = &rec->msg_plaintext;
967 msg_en = &rec->msg_encrypted;
969 orig_size = msg_pl->sg.size;
971 try_to_copy = msg_data_left(msg);
972 record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
973 if (try_to_copy >= record_room) {
974 try_to_copy = record_room;
978 required_size = msg_pl->sg.size + try_to_copy +
981 if (!sk_stream_memory_free(sk))
982 goto wait_for_sndbuf;
985 ret = tls_alloc_encrypted_msg(sk, required_size);
988 goto wait_for_memory;
990 /* Adjust try_to_copy according to the amount that was
991 * actually allocated. The difference is due
992 * to max sg elements limit
994 try_to_copy -= required_size - msg_en->sg.size;
998 if (!is_kvec && (full_record || eor) && !async_capable) {
999 u32 first = msg_pl->sg.end;
1001 ret = sk_msg_zerocopy_from_iter(sk, &msg->msg_iter,
1002 msg_pl, try_to_copy);
1004 goto fallback_to_reg_send;
1007 copied += try_to_copy;
1009 sk_msg_sg_copy_set(msg_pl, first);
1010 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1011 record_type, &copied,
1014 if (ret == -EINPROGRESS)
1016 else if (ret == -ENOMEM)
1017 goto wait_for_memory;
1018 else if (ctx->open_rec && ret == -ENOSPC)
1020 else if (ret != -EAGAIN)
1025 copied -= try_to_copy;
1026 sk_msg_sg_copy_clear(msg_pl, first);
1027 iov_iter_revert(&msg->msg_iter,
1028 msg_pl->sg.size - orig_size);
1029 fallback_to_reg_send:
1030 sk_msg_trim(sk, msg_pl, orig_size);
1033 required_size = msg_pl->sg.size + try_to_copy;
1035 ret = tls_clone_plaintext_msg(sk, required_size);
1040 /* Adjust try_to_copy according to the amount that was
1041 * actually allocated. The difference is due
1042 * to max sg elements limit
1044 try_to_copy -= required_size - msg_pl->sg.size;
1046 sk_msg_trim(sk, msg_en,
1047 msg_pl->sg.size + prot->overhead_size);
1051 ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter,
1052 msg_pl, try_to_copy);
1057 /* Open records defined only if successfully copied, otherwise
1058 * we would trim the sg but not reset the open record frags.
1060 tls_ctx->pending_open_record_frags = true;
1061 copied += try_to_copy;
1062 if (full_record || eor) {
1063 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1064 record_type, &copied,
1067 if (ret == -EINPROGRESS)
1069 else if (ret == -ENOMEM)
1070 goto wait_for_memory;
1071 else if (ret != -EAGAIN) {
1082 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1084 ret = sk_stream_wait_memory(sk, &timeo);
1088 tls_trim_both_msgs(sk, orig_size);
1092 if (ctx->open_rec && msg_en->sg.size < required_size)
1093 goto alloc_encrypted;
1098 } else if (num_zc) {
1099 /* Wait for pending encryptions to get completed */
1100 spin_lock_bh(&ctx->encrypt_compl_lock);
1101 ctx->async_notify = true;
1103 pending = atomic_read(&ctx->encrypt_pending);
1104 spin_unlock_bh(&ctx->encrypt_compl_lock);
1106 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1108 reinit_completion(&ctx->async_wait.completion);
1110 /* There can be no concurrent accesses, since we have no
1111 * pending encrypt operations
1113 WRITE_ONCE(ctx->async_notify, false);
1115 if (ctx->async_wait.err) {
1116 ret = ctx->async_wait.err;
1121 /* Transmit if any encryptions have completed */
1122 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1123 cancel_delayed_work(&ctx->tx_work.work);
1124 tls_tx_records(sk, msg->msg_flags);
1128 ret = sk_stream_error(sk, msg->msg_flags, ret);
1131 mutex_unlock(&tls_ctx->tx_lock);
1132 return copied > 0 ? copied : ret;
1135 static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
1136 int offset, size_t size, int flags)
1138 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1139 struct tls_context *tls_ctx = tls_get_ctx(sk);
1140 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
1141 struct tls_prot_info *prot = &tls_ctx->prot_info;
1142 unsigned char record_type = TLS_RECORD_TYPE_DATA;
1143 struct sk_msg *msg_pl;
1144 struct tls_rec *rec;
1152 eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST));
1153 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1155 /* Call the sk_stream functions to manage the sndbuf mem. */
1157 size_t copy, required_size;
1165 rec = ctx->open_rec;
1167 rec = ctx->open_rec = tls_get_rec(sk);
1173 msg_pl = &rec->msg_plaintext;
1175 full_record = false;
1176 record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
1178 if (copy >= record_room) {
1183 required_size = msg_pl->sg.size + copy + prot->overhead_size;
1185 if (!sk_stream_memory_free(sk))
1186 goto wait_for_sndbuf;
1188 ret = tls_alloc_encrypted_msg(sk, required_size);
1191 goto wait_for_memory;
1193 /* Adjust copy according to the amount that was
1194 * actually allocated. The difference is due
1195 * to max sg elements limit
1197 copy -= required_size - msg_pl->sg.size;
1201 sk_msg_page_add(msg_pl, page, copy, offset);
1202 sk_mem_charge(sk, copy);
1208 tls_ctx->pending_open_record_frags = true;
1209 if (full_record || eor || sk_msg_full(msg_pl)) {
1210 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1211 record_type, &copied, flags);
1213 if (ret == -EINPROGRESS)
1215 else if (ret == -ENOMEM)
1216 goto wait_for_memory;
1217 else if (ret != -EAGAIN) {
1226 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1228 ret = sk_stream_wait_memory(sk, &timeo);
1231 tls_trim_both_msgs(sk, msg_pl->sg.size);
1240 /* Transmit if any encryptions have completed */
1241 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1242 cancel_delayed_work(&ctx->tx_work.work);
1243 tls_tx_records(sk, flags);
1247 ret = sk_stream_error(sk, flags, ret);
1248 return copied > 0 ? copied : ret;
1251 int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
1252 int offset, size_t size, int flags)
1254 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1255 MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY |
1256 MSG_NO_SHARED_FRAGS))
1259 return tls_sw_do_sendpage(sk, page, offset, size, flags);
1262 int tls_sw_sendpage(struct sock *sk, struct page *page,
1263 int offset, size_t size, int flags)
1265 struct tls_context *tls_ctx = tls_get_ctx(sk);
1268 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1269 MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
1272 mutex_lock(&tls_ctx->tx_lock);
1274 ret = tls_sw_do_sendpage(sk, page, offset, size, flags);
1276 mutex_unlock(&tls_ctx->tx_lock);
1280 static struct sk_buff *tls_wait_data(struct sock *sk, struct sk_psock *psock,
1281 int flags, long timeo, int *err)
1283 struct tls_context *tls_ctx = tls_get_ctx(sk);
1284 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1285 struct sk_buff *skb;
1286 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1288 while (!(skb = ctx->recv_pkt) && sk_psock_queue_empty(psock)) {
1290 *err = sock_error(sk);
1294 if (sk->sk_shutdown & RCV_SHUTDOWN)
1297 if (sock_flag(sk, SOCK_DONE))
1300 if ((flags & MSG_DONTWAIT) || !timeo) {
1305 add_wait_queue(sk_sleep(sk), &wait);
1306 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1307 sk_wait_event(sk, &timeo,
1308 ctx->recv_pkt != skb ||
1309 !sk_psock_queue_empty(psock),
1311 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1312 remove_wait_queue(sk_sleep(sk), &wait);
1314 /* Handle signals */
1315 if (signal_pending(current)) {
1316 *err = sock_intr_errno(timeo);
1324 static int tls_setup_from_iter(struct sock *sk, struct iov_iter *from,
1325 int length, int *pages_used,
1326 unsigned int *size_used,
1327 struct scatterlist *to,
1330 int rc = 0, i = 0, num_elem = *pages_used, maxpages;
1331 struct page *pages[MAX_SKB_FRAGS];
1332 unsigned int size = *size_used;
1333 ssize_t copied, use;
1336 while (length > 0) {
1338 maxpages = to_max_pages - num_elem;
1339 if (maxpages == 0) {
1343 copied = iov_iter_get_pages(from, pages,
1351 iov_iter_advance(from, copied);
1356 use = min_t(int, copied, PAGE_SIZE - offset);
1358 sg_set_page(&to[num_elem],
1359 pages[i], use, offset);
1360 sg_unmark_end(&to[num_elem]);
1361 /* We do not uncharge memory from this API */
1370 /* Mark the end in the last sg entry if newly added */
1371 if (num_elem > *pages_used)
1372 sg_mark_end(&to[num_elem - 1]);
1375 iov_iter_revert(from, size - *size_used);
1377 *pages_used = num_elem;
1382 /* This function decrypts the input skb into either out_iov or in out_sg
1383 * or in skb buffers itself. The input parameter 'zc' indicates if
1384 * zero-copy mode needs to be tried or not. With zero-copy mode, either
1385 * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are
1386 * NULL, then the decryption happens inside skb buffers itself, i.e.
1387 * zero-copy gets disabled and 'zc' is updated.
1390 static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
1391 struct iov_iter *out_iov,
1392 struct scatterlist *out_sg,
1393 int *chunk, bool *zc, bool async)
1395 struct tls_context *tls_ctx = tls_get_ctx(sk);
1396 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1397 struct tls_prot_info *prot = &tls_ctx->prot_info;
1398 struct strp_msg *rxm = strp_msg(skb);
1399 int n_sgin, n_sgout, nsg, mem_size, aead_size, err, pages = 0;
1400 struct aead_request *aead_req;
1401 struct sk_buff *unused;
1402 u8 *aad, *iv, *mem = NULL;
1403 struct scatterlist *sgin = NULL;
1404 struct scatterlist *sgout = NULL;
1405 const int data_len = rxm->full_len - prot->overhead_size +
1409 if (*zc && (out_iov || out_sg)) {
1411 n_sgout = iov_iter_npages(out_iov, INT_MAX) + 1;
1413 n_sgout = sg_nents(out_sg);
1414 n_sgin = skb_nsg(skb, rxm->offset + prot->prepend_size,
1415 rxm->full_len - prot->prepend_size);
1419 n_sgin = skb_cow_data(skb, 0, &unused);
1425 /* Increment to accommodate AAD */
1426 n_sgin = n_sgin + 1;
1428 nsg = n_sgin + n_sgout;
1430 aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
1431 mem_size = aead_size + (nsg * sizeof(struct scatterlist));
1432 mem_size = mem_size + prot->aad_size;
1433 mem_size = mem_size + crypto_aead_ivsize(ctx->aead_recv);
1435 /* Allocate a single block of memory which contains
1436 * aead_req || sgin[] || sgout[] || aad || iv.
1437 * This order achieves correct alignment for aead_req, sgin, sgout.
1439 mem = kmalloc(mem_size, sk->sk_allocation);
1443 /* Segment the allocated memory */
1444 aead_req = (struct aead_request *)mem;
1445 sgin = (struct scatterlist *)(mem + aead_size);
1446 sgout = sgin + n_sgin;
1447 aad = (u8 *)(sgout + n_sgout);
1448 iv = aad + prot->aad_size;
1450 /* For CCM based ciphers, first byte of nonce+iv is always '2' */
1451 if (prot->cipher_type == TLS_CIPHER_AES_CCM_128) {
1457 err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
1458 iv + iv_offset + prot->salt_size,
1464 if (prot->version == TLS_1_3_VERSION)
1465 memcpy(iv + iv_offset, tls_ctx->rx.iv,
1466 crypto_aead_ivsize(ctx->aead_recv));
1468 memcpy(iv + iv_offset, tls_ctx->rx.iv, prot->salt_size);
1470 xor_iv_with_seq(prot->version, iv, tls_ctx->rx.rec_seq);
1473 tls_make_aad(aad, rxm->full_len - prot->overhead_size +
1475 tls_ctx->rx.rec_seq, prot->rec_seq_size,
1476 ctx->control, prot->version);
1479 sg_init_table(sgin, n_sgin);
1480 sg_set_buf(&sgin[0], aad, prot->aad_size);
1481 err = skb_to_sgvec(skb, &sgin[1],
1482 rxm->offset + prot->prepend_size,
1483 rxm->full_len - prot->prepend_size);
1491 sg_init_table(sgout, n_sgout);
1492 sg_set_buf(&sgout[0], aad, prot->aad_size);
1495 err = tls_setup_from_iter(sk, out_iov, data_len,
1496 &pages, chunk, &sgout[1],
1499 goto fallback_to_reg_recv;
1500 } else if (out_sg) {
1501 memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
1503 goto fallback_to_reg_recv;
1506 fallback_to_reg_recv:
1513 /* Prepare and submit AEAD request */
1514 err = tls_do_decryption(sk, skb, sgin, sgout, iv,
1515 data_len, aead_req, async);
1516 if (err == -EINPROGRESS)
1519 /* Release the pages in case iov was mapped to pages */
1520 for (; pages > 0; pages--)
1521 put_page(sg_page(&sgout[pages]));
1527 static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
1528 struct iov_iter *dest, int *chunk, bool *zc,
1531 struct tls_context *tls_ctx = tls_get_ctx(sk);
1532 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1533 struct tls_prot_info *prot = &tls_ctx->prot_info;
1534 struct strp_msg *rxm = strp_msg(skb);
1537 if (!ctx->decrypted) {
1538 if (tls_ctx->rx_conf == TLS_HW) {
1539 err = tls_device_decrypted(sk, skb);
1544 /* Still not decrypted after tls_device */
1545 if (!ctx->decrypted) {
1546 err = decrypt_internal(sk, skb, dest, NULL, chunk, zc,
1549 if (err == -EINPROGRESS)
1550 tls_advance_record_sn(sk, prot,
1559 pad = padding_length(ctx, prot, skb);
1563 rxm->full_len -= pad;
1564 rxm->offset += prot->prepend_size;
1565 rxm->full_len -= prot->overhead_size;
1566 tls_advance_record_sn(sk, prot, &tls_ctx->rx);
1567 ctx->decrypted = true;
1568 ctx->saved_data_ready(sk);
1576 int decrypt_skb(struct sock *sk, struct sk_buff *skb,
1577 struct scatterlist *sgout)
1582 return decrypt_internal(sk, skb, NULL, sgout, &chunk, &zc, false);
1585 static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb,
1588 struct tls_context *tls_ctx = tls_get_ctx(sk);
1589 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1592 struct strp_msg *rxm = strp_msg(skb);
1594 if (len < rxm->full_len) {
1596 rxm->full_len -= len;
1602 /* Finished with message */
1603 ctx->recv_pkt = NULL;
1604 __strp_unpause(&ctx->strp);
1609 /* This function traverses the rx_list in tls receive context to copies the
1610 * decrypted records into the buffer provided by caller zero copy is not
1611 * true. Further, the records are removed from the rx_list if it is not a peek
1612 * case and the record has been consumed completely.
1614 static int process_rx_list(struct tls_sw_context_rx *ctx,
1623 struct sk_buff *skb = skb_peek(&ctx->rx_list);
1626 struct tls_msg *tlm;
1629 /* Set the record type in 'control' if caller didn't pass it */
1632 ctrl = tlm->control;
1635 while (skip && skb) {
1636 struct strp_msg *rxm = strp_msg(skb);
1639 /* Cannot process a record of different type */
1640 if (ctrl != tlm->control)
1643 if (skip < rxm->full_len)
1646 skip = skip - rxm->full_len;
1647 skb = skb_peek_next(skb, &ctx->rx_list);
1650 while (len && skb) {
1651 struct sk_buff *next_skb;
1652 struct strp_msg *rxm = strp_msg(skb);
1653 int chunk = min_t(unsigned int, rxm->full_len - skip, len);
1657 /* Cannot process a record of different type */
1658 if (ctrl != tlm->control)
1661 /* Set record type if not already done. For a non-data record,
1662 * do not proceed if record type could not be copied.
1665 int cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
1666 sizeof(ctrl), &ctrl);
1668 if (ctrl != TLS_RECORD_TYPE_DATA) {
1669 if (cerr || msg->msg_flags & MSG_CTRUNC)
1676 if (!zc || (rxm->full_len - skip) > len) {
1677 int err = skb_copy_datagram_msg(skb, rxm->offset + skip,
1684 copied = copied + chunk;
1686 /* Consume the data from record if it is non-peek case*/
1688 rxm->offset = rxm->offset + chunk;
1689 rxm->full_len = rxm->full_len - chunk;
1691 /* Return if there is unconsumed data in the record */
1692 if (rxm->full_len - skip)
1696 /* The remaining skip-bytes must lie in 1st record in rx_list.
1697 * So from the 2nd record, 'skip' should be 0.
1702 msg->msg_flags |= MSG_EOR;
1704 next_skb = skb_peek_next(skb, &ctx->rx_list);
1707 skb_unlink(skb, &ctx->rx_list);
1718 int tls_sw_recvmsg(struct sock *sk,
1725 struct tls_context *tls_ctx = tls_get_ctx(sk);
1726 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1727 struct tls_prot_info *prot = &tls_ctx->prot_info;
1728 struct sk_psock *psock;
1729 unsigned char control = 0;
1730 ssize_t decrypted = 0;
1731 struct strp_msg *rxm;
1732 struct tls_msg *tlm;
1733 struct sk_buff *skb;
1736 int target, err = 0;
1738 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
1739 bool is_peek = flags & MSG_PEEK;
1745 if (unlikely(flags & MSG_ERRQUEUE))
1746 return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
1748 psock = sk_psock_get(sk);
1751 /* Process pending decrypted records. It must be non-zero-copy */
1752 err = process_rx_list(ctx, msg, &control, &cmsg, 0, len, false,
1755 tls_err_abort(sk, err);
1764 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1766 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1768 while (len && (decrypted + copied < target || ctx->recv_pkt)) {
1769 bool retain_skb = false;
1776 skb = tls_wait_data(sk, psock, flags, timeo, &err);
1779 int ret = __tcp_bpf_recvmsg(sk, psock,
1791 if (prot->version == TLS_1_3_VERSION)
1794 tlm->control = ctx->control;
1797 rxm = strp_msg(skb);
1799 to_decrypt = rxm->full_len - prot->overhead_size;
1801 if (to_decrypt <= len && !is_kvec && !is_peek &&
1802 ctx->control == TLS_RECORD_TYPE_DATA &&
1803 prot->version != TLS_1_3_VERSION)
1806 /* Do not use async mode if record is non-data */
1807 if (ctx->control == TLS_RECORD_TYPE_DATA)
1808 async_capable = ctx->async_capable;
1810 async_capable = false;
1812 err = decrypt_skb_update(sk, skb, &msg->msg_iter,
1813 &chunk, &zc, async_capable);
1814 if (err < 0 && err != -EINPROGRESS) {
1815 tls_err_abort(sk, EBADMSG);
1819 if (err == -EINPROGRESS) {
1822 } else if (prot->version == TLS_1_3_VERSION) {
1823 tlm->control = ctx->control;
1826 /* If the type of records being processed is not known yet,
1827 * set it to record type just dequeued. If it is already known,
1828 * but does not match the record type just dequeued, go to end.
1829 * We always get record type here since for tls1.2, record type
1830 * is known just after record is dequeued from stream parser.
1831 * For tls1.3, we disable async.
1835 control = tlm->control;
1836 else if (control != tlm->control)
1842 cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
1843 sizeof(control), &control);
1845 if (control != TLS_RECORD_TYPE_DATA) {
1846 if (cerr || msg->msg_flags & MSG_CTRUNC) {
1854 goto pick_next_record;
1857 if (rxm->full_len > len) {
1861 chunk = rxm->full_len;
1864 err = skb_copy_datagram_msg(skb, rxm->offset,
1870 rxm->offset = rxm->offset + chunk;
1871 rxm->full_len = rxm->full_len - chunk;
1882 /* For async or peek case, queue the current skb */
1883 if (async || is_peek || retain_skb) {
1884 skb_queue_tail(&ctx->rx_list, skb);
1888 if (tls_sw_advance_skb(sk, skb, chunk)) {
1889 /* Return full control message to
1890 * userspace before trying to parse
1891 * another message type
1893 msg->msg_flags |= MSG_EOR;
1894 if (ctx->control != TLS_RECORD_TYPE_DATA)
1903 /* Wait for all previously submitted records to be decrypted */
1904 spin_lock_bh(&ctx->decrypt_compl_lock);
1905 ctx->async_notify = true;
1906 pending = atomic_read(&ctx->decrypt_pending);
1907 spin_unlock_bh(&ctx->decrypt_compl_lock);
1909 err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1911 /* one of async decrypt failed */
1912 tls_err_abort(sk, err);
1918 reinit_completion(&ctx->async_wait.completion);
1921 /* There can be no concurrent accesses, since we have no
1922 * pending decrypt operations
1924 WRITE_ONCE(ctx->async_notify, false);
1926 /* Drain records from the rx_list & copy if required */
1927 if (is_peek || is_kvec)
1928 err = process_rx_list(ctx, msg, &control, &cmsg, copied,
1929 decrypted, false, is_peek);
1931 err = process_rx_list(ctx, msg, &control, &cmsg, 0,
1932 decrypted, true, is_peek);
1934 tls_err_abort(sk, err);
1940 copied += decrypted;
1945 sk_psock_put(sk, psock);
1946 return copied ? : err;
1949 ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
1950 struct pipe_inode_info *pipe,
1951 size_t len, unsigned int flags)
1953 struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
1954 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1955 struct strp_msg *rxm = NULL;
1956 struct sock *sk = sock->sk;
1957 struct sk_buff *skb;
1966 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1968 skb = tls_wait_data(sk, NULL, flags, timeo, &err);
1970 goto splice_read_end;
1972 if (!ctx->decrypted) {
1973 err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc, false);
1975 /* splice does not support reading control messages */
1976 if (ctx->control != TLS_RECORD_TYPE_DATA) {
1978 goto splice_read_end;
1982 tls_err_abort(sk, EBADMSG);
1983 goto splice_read_end;
1985 ctx->decrypted = true;
1987 rxm = strp_msg(skb);
1989 chunk = min_t(unsigned int, rxm->full_len, len);
1990 copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
1992 goto splice_read_end;
1994 if (likely(!(flags & MSG_PEEK)))
1995 tls_sw_advance_skb(sk, skb, copied);
1999 return copied ? : err;
2002 bool tls_sw_stream_read(const struct sock *sk)
2004 struct tls_context *tls_ctx = tls_get_ctx(sk);
2005 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2006 bool ingress_empty = true;
2007 struct sk_psock *psock;
2010 psock = sk_psock(sk);
2012 ingress_empty = list_empty(&psock->ingress_msg);
2015 return !ingress_empty || ctx->recv_pkt ||
2016 !skb_queue_empty(&ctx->rx_list);
2019 static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
2021 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
2022 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2023 struct tls_prot_info *prot = &tls_ctx->prot_info;
2024 char header[TLS_HEADER_SIZE + MAX_IV_SIZE];
2025 struct strp_msg *rxm = strp_msg(skb);
2026 size_t cipher_overhead;
2027 size_t data_len = 0;
2030 /* Verify that we have a full TLS header, or wait for more data */
2031 if (rxm->offset + prot->prepend_size > skb->len)
2034 /* Sanity-check size of on-stack buffer. */
2035 if (WARN_ON(prot->prepend_size > sizeof(header))) {
2040 /* Linearize header to local buffer */
2041 ret = skb_copy_bits(skb, rxm->offset, header, prot->prepend_size);
2046 ctx->control = header[0];
2048 data_len = ((header[4] & 0xFF) | (header[3] << 8));
2050 cipher_overhead = prot->tag_size;
2051 if (prot->version != TLS_1_3_VERSION)
2052 cipher_overhead += prot->iv_size;
2054 if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead +
2059 if (data_len < cipher_overhead) {
2064 /* Note that both TLS1.3 and TLS1.2 use TLS_1_2 version here */
2065 if (header[1] != TLS_1_2_VERSION_MINOR ||
2066 header[2] != TLS_1_2_VERSION_MAJOR) {
2071 tls_device_rx_resync_new_rec(strp->sk, data_len + TLS_HEADER_SIZE,
2072 TCP_SKB_CB(skb)->seq + rxm->offset);
2073 return data_len + TLS_HEADER_SIZE;
2076 tls_err_abort(strp->sk, ret);
2081 static void tls_queue(struct strparser *strp, struct sk_buff *skb)
2083 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
2084 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2086 ctx->decrypted = false;
2088 ctx->recv_pkt = skb;
2091 ctx->saved_data_ready(strp->sk);
2094 static void tls_data_ready(struct sock *sk)
2096 struct tls_context *tls_ctx = tls_get_ctx(sk);
2097 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2098 struct sk_psock *psock;
2100 strp_data_ready(&ctx->strp);
2102 psock = sk_psock_get(sk);
2104 if (!list_empty(&psock->ingress_msg))
2105 ctx->saved_data_ready(sk);
2106 sk_psock_put(sk, psock);
2110 void tls_sw_cancel_work_tx(struct tls_context *tls_ctx)
2112 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2114 set_bit(BIT_TX_CLOSING, &ctx->tx_bitmask);
2115 set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask);
2116 cancel_delayed_work_sync(&ctx->tx_work.work);
2119 void tls_sw_release_resources_tx(struct sock *sk)
2121 struct tls_context *tls_ctx = tls_get_ctx(sk);
2122 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2123 struct tls_rec *rec, *tmp;
2125 /* Wait for any pending async encryptions to complete */
2126 smp_store_mb(ctx->async_notify, true);
2127 if (atomic_read(&ctx->encrypt_pending))
2128 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
2130 tls_tx_records(sk, -1);
2132 /* Free up un-sent records in tx_list. First, free
2133 * the partially sent record if any at head of tx_list.
2135 if (tls_ctx->partially_sent_record) {
2136 tls_free_partial_record(sk, tls_ctx);
2137 rec = list_first_entry(&ctx->tx_list,
2138 struct tls_rec, list);
2139 list_del(&rec->list);
2140 sk_msg_free(sk, &rec->msg_plaintext);
2144 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
2145 list_del(&rec->list);
2146 sk_msg_free(sk, &rec->msg_encrypted);
2147 sk_msg_free(sk, &rec->msg_plaintext);
2151 crypto_free_aead(ctx->aead_send);
2152 tls_free_open_rec(sk);
2155 void tls_sw_free_ctx_tx(struct tls_context *tls_ctx)
2157 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2162 void tls_sw_release_resources_rx(struct sock *sk)
2164 struct tls_context *tls_ctx = tls_get_ctx(sk);
2165 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2167 kfree(tls_ctx->rx.rec_seq);
2168 kfree(tls_ctx->rx.iv);
2170 if (ctx->aead_recv) {
2171 kfree_skb(ctx->recv_pkt);
2172 ctx->recv_pkt = NULL;
2173 skb_queue_purge(&ctx->rx_list);
2174 crypto_free_aead(ctx->aead_recv);
2175 strp_stop(&ctx->strp);
2176 /* If tls_sw_strparser_arm() was not called (cleanup paths)
2177 * we still want to strp_stop(), but sk->sk_data_ready was
2180 if (ctx->saved_data_ready) {
2181 write_lock_bh(&sk->sk_callback_lock);
2182 sk->sk_data_ready = ctx->saved_data_ready;
2183 write_unlock_bh(&sk->sk_callback_lock);
2188 void tls_sw_strparser_done(struct tls_context *tls_ctx)
2190 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2192 strp_done(&ctx->strp);
2195 void tls_sw_free_ctx_rx(struct tls_context *tls_ctx)
2197 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2202 void tls_sw_free_resources_rx(struct sock *sk)
2204 struct tls_context *tls_ctx = tls_get_ctx(sk);
2206 tls_sw_release_resources_rx(sk);
2207 tls_sw_free_ctx_rx(tls_ctx);
2210 /* The work handler to transmitt the encrypted records in tx_list */
2211 static void tx_work_handler(struct work_struct *work)
2213 struct delayed_work *delayed_work = to_delayed_work(work);
2214 struct tx_work *tx_work = container_of(delayed_work,
2215 struct tx_work, work);
2216 struct sock *sk = tx_work->sk;
2217 struct tls_context *tls_ctx = tls_get_ctx(sk);
2218 struct tls_sw_context_tx *ctx;
2220 if (unlikely(!tls_ctx))
2223 ctx = tls_sw_ctx_tx(tls_ctx);
2224 if (test_bit(BIT_TX_CLOSING, &ctx->tx_bitmask))
2227 if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
2229 mutex_lock(&tls_ctx->tx_lock);
2231 tls_tx_records(sk, -1);
2233 mutex_unlock(&tls_ctx->tx_lock);
2236 void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
2238 struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
2240 /* Schedule the transmission if tx list is ready */
2241 if (is_tx_ready(tx_ctx) &&
2242 !test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask))
2243 schedule_delayed_work(&tx_ctx->tx_work.work, 0);
2246 void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx)
2248 struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
2250 write_lock_bh(&sk->sk_callback_lock);
2251 rx_ctx->saved_data_ready = sk->sk_data_ready;
2252 sk->sk_data_ready = tls_data_ready;
2253 write_unlock_bh(&sk->sk_callback_lock);
2255 strp_check_rcv(&rx_ctx->strp);
2258 int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
2260 struct tls_context *tls_ctx = tls_get_ctx(sk);
2261 struct tls_prot_info *prot = &tls_ctx->prot_info;
2262 struct tls_crypto_info *crypto_info;
2263 struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
2264 struct tls12_crypto_info_aes_gcm_256 *gcm_256_info;
2265 struct tls12_crypto_info_aes_ccm_128 *ccm_128_info;
2266 struct tls_sw_context_tx *sw_ctx_tx = NULL;
2267 struct tls_sw_context_rx *sw_ctx_rx = NULL;
2268 struct cipher_context *cctx;
2269 struct crypto_aead **aead;
2270 struct strp_callbacks cb;
2271 u16 nonce_size, tag_size, iv_size, rec_seq_size, salt_size;
2272 struct crypto_tfm *tfm;
2273 char *iv, *rec_seq, *key, *salt, *cipher_name;
2283 if (!ctx->priv_ctx_tx) {
2284 sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
2289 ctx->priv_ctx_tx = sw_ctx_tx;
2292 (struct tls_sw_context_tx *)ctx->priv_ctx_tx;
2295 if (!ctx->priv_ctx_rx) {
2296 sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
2301 ctx->priv_ctx_rx = sw_ctx_rx;
2304 (struct tls_sw_context_rx *)ctx->priv_ctx_rx;
2309 crypto_init_wait(&sw_ctx_tx->async_wait);
2310 spin_lock_init(&sw_ctx_tx->encrypt_compl_lock);
2311 crypto_info = &ctx->crypto_send.info;
2313 aead = &sw_ctx_tx->aead_send;
2314 INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
2315 INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
2316 sw_ctx_tx->tx_work.sk = sk;
2318 crypto_init_wait(&sw_ctx_rx->async_wait);
2319 spin_lock_init(&sw_ctx_rx->decrypt_compl_lock);
2320 crypto_info = &ctx->crypto_recv.info;
2322 skb_queue_head_init(&sw_ctx_rx->rx_list);
2323 aead = &sw_ctx_rx->aead_recv;
2326 switch (crypto_info->cipher_type) {
2327 case TLS_CIPHER_AES_GCM_128: {
2328 nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
2329 tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
2330 iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
2331 iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
2332 rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
2334 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
2336 (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
2337 keysize = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
2338 key = gcm_128_info->key;
2339 salt = gcm_128_info->salt;
2340 salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE;
2341 cipher_name = "gcm(aes)";
2344 case TLS_CIPHER_AES_GCM_256: {
2345 nonce_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
2346 tag_size = TLS_CIPHER_AES_GCM_256_TAG_SIZE;
2347 iv_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
2348 iv = ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->iv;
2349 rec_seq_size = TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE;
2351 ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->rec_seq;
2353 (struct tls12_crypto_info_aes_gcm_256 *)crypto_info;
2354 keysize = TLS_CIPHER_AES_GCM_256_KEY_SIZE;
2355 key = gcm_256_info->key;
2356 salt = gcm_256_info->salt;
2357 salt_size = TLS_CIPHER_AES_GCM_256_SALT_SIZE;
2358 cipher_name = "gcm(aes)";
2361 case TLS_CIPHER_AES_CCM_128: {
2362 nonce_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
2363 tag_size = TLS_CIPHER_AES_CCM_128_TAG_SIZE;
2364 iv_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
2365 iv = ((struct tls12_crypto_info_aes_ccm_128 *)crypto_info)->iv;
2366 rec_seq_size = TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE;
2368 ((struct tls12_crypto_info_aes_ccm_128 *)crypto_info)->rec_seq;
2370 (struct tls12_crypto_info_aes_ccm_128 *)crypto_info;
2371 keysize = TLS_CIPHER_AES_CCM_128_KEY_SIZE;
2372 key = ccm_128_info->key;
2373 salt = ccm_128_info->salt;
2374 salt_size = TLS_CIPHER_AES_CCM_128_SALT_SIZE;
2375 cipher_name = "ccm(aes)";
2383 /* Sanity-check the sizes for stack allocations. */
2384 if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE ||
2385 rec_seq_size > TLS_MAX_REC_SEQ_SIZE) {
2390 if (crypto_info->version == TLS_1_3_VERSION) {
2392 prot->aad_size = TLS_HEADER_SIZE;
2393 prot->tail_size = 1;
2395 prot->aad_size = TLS_AAD_SPACE_SIZE;
2396 prot->tail_size = 0;
2399 prot->version = crypto_info->version;
2400 prot->cipher_type = crypto_info->cipher_type;
2401 prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
2402 prot->tag_size = tag_size;
2403 prot->overhead_size = prot->prepend_size +
2404 prot->tag_size + prot->tail_size;
2405 prot->iv_size = iv_size;
2406 prot->salt_size = salt_size;
2407 cctx->iv = kmalloc(iv_size + salt_size, GFP_KERNEL);
2412 /* Note: 128 & 256 bit salt are the same size */
2413 prot->rec_seq_size = rec_seq_size;
2414 memcpy(cctx->iv, salt, salt_size);
2415 memcpy(cctx->iv + salt_size, iv, iv_size);
2416 cctx->rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
2417 if (!cctx->rec_seq) {
2423 *aead = crypto_alloc_aead(cipher_name, 0, 0);
2424 if (IS_ERR(*aead)) {
2425 rc = PTR_ERR(*aead);
2431 ctx->push_pending_record = tls_sw_push_pending_record;
2433 rc = crypto_aead_setkey(*aead, key, keysize);
2438 rc = crypto_aead_setauthsize(*aead, prot->tag_size);
2443 tfm = crypto_aead_tfm(sw_ctx_rx->aead_recv);
2445 if (crypto_info->version == TLS_1_3_VERSION)
2446 sw_ctx_rx->async_capable = false;
2448 sw_ctx_rx->async_capable =
2449 tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC;
2451 /* Set up strparser */
2452 memset(&cb, 0, sizeof(cb));
2453 cb.rcv_msg = tls_queue;
2454 cb.parse_msg = tls_read_size;
2456 strp_init(&sw_ctx_rx->strp, sk, &cb);
2462 crypto_free_aead(*aead);
2465 kfree(cctx->rec_seq);
2466 cctx->rec_seq = NULL;
2472 kfree(ctx->priv_ctx_tx);
2473 ctx->priv_ctx_tx = NULL;
2475 kfree(ctx->priv_ctx_rx);
2476 ctx->priv_ctx_rx = NULL;