Linux-libre 5.3.12-gnu
[librecmc/linux-libre.git] / net / core / skmsg.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
3
4 #include <linux/skmsg.h>
5 #include <linux/skbuff.h>
6 #include <linux/scatterlist.h>
7
8 #include <net/sock.h>
9 #include <net/tcp.h>
10
11 static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce)
12 {
13         if (msg->sg.end > msg->sg.start &&
14             elem_first_coalesce < msg->sg.end)
15                 return true;
16
17         if (msg->sg.end < msg->sg.start &&
18             (elem_first_coalesce > msg->sg.start ||
19              elem_first_coalesce < msg->sg.end))
20                 return true;
21
22         return false;
23 }
24
25 int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
26                  int elem_first_coalesce)
27 {
28         struct page_frag *pfrag = sk_page_frag(sk);
29         int ret = 0;
30
31         len -= msg->sg.size;
32         while (len > 0) {
33                 struct scatterlist *sge;
34                 u32 orig_offset;
35                 int use, i;
36
37                 if (!sk_page_frag_refill(sk, pfrag))
38                         return -ENOMEM;
39
40                 orig_offset = pfrag->offset;
41                 use = min_t(int, len, pfrag->size - orig_offset);
42                 if (!sk_wmem_schedule(sk, use))
43                         return -ENOMEM;
44
45                 i = msg->sg.end;
46                 sk_msg_iter_var_prev(i);
47                 sge = &msg->sg.data[i];
48
49                 if (sk_msg_try_coalesce_ok(msg, elem_first_coalesce) &&
50                     sg_page(sge) == pfrag->page &&
51                     sge->offset + sge->length == orig_offset) {
52                         sge->length += use;
53                 } else {
54                         if (sk_msg_full(msg)) {
55                                 ret = -ENOSPC;
56                                 break;
57                         }
58
59                         sge = &msg->sg.data[msg->sg.end];
60                         sg_unmark_end(sge);
61                         sg_set_page(sge, pfrag->page, use, orig_offset);
62                         get_page(pfrag->page);
63                         sk_msg_iter_next(msg, end);
64                 }
65
66                 sk_mem_charge(sk, use);
67                 msg->sg.size += use;
68                 pfrag->offset += use;
69                 len -= use;
70         }
71
72         return ret;
73 }
74 EXPORT_SYMBOL_GPL(sk_msg_alloc);
75
76 int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
77                  u32 off, u32 len)
78 {
79         int i = src->sg.start;
80         struct scatterlist *sge = sk_msg_elem(src, i);
81         struct scatterlist *sgd = NULL;
82         u32 sge_len, sge_off;
83
84         while (off) {
85                 if (sge->length > off)
86                         break;
87                 off -= sge->length;
88                 sk_msg_iter_var_next(i);
89                 if (i == src->sg.end && off)
90                         return -ENOSPC;
91                 sge = sk_msg_elem(src, i);
92         }
93
94         while (len) {
95                 sge_len = sge->length - off;
96                 if (sge_len > len)
97                         sge_len = len;
98
99                 if (dst->sg.end)
100                         sgd = sk_msg_elem(dst, dst->sg.end - 1);
101
102                 if (sgd &&
103                     (sg_page(sge) == sg_page(sgd)) &&
104                     (sg_virt(sge) + off == sg_virt(sgd) + sgd->length)) {
105                         sgd->length += sge_len;
106                         dst->sg.size += sge_len;
107                 } else if (!sk_msg_full(dst)) {
108                         sge_off = sge->offset + off;
109                         sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off);
110                 } else {
111                         return -ENOSPC;
112                 }
113
114                 off = 0;
115                 len -= sge_len;
116                 sk_mem_charge(sk, sge_len);
117                 sk_msg_iter_var_next(i);
118                 if (i == src->sg.end && len)
119                         return -ENOSPC;
120                 sge = sk_msg_elem(src, i);
121         }
122
123         return 0;
124 }
125 EXPORT_SYMBOL_GPL(sk_msg_clone);
126
127 void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes)
128 {
129         int i = msg->sg.start;
130
131         do {
132                 struct scatterlist *sge = sk_msg_elem(msg, i);
133
134                 if (bytes < sge->length) {
135                         sge->length -= bytes;
136                         sge->offset += bytes;
137                         sk_mem_uncharge(sk, bytes);
138                         break;
139                 }
140
141                 sk_mem_uncharge(sk, sge->length);
142                 bytes -= sge->length;
143                 sge->length = 0;
144                 sge->offset = 0;
145                 sk_msg_iter_var_next(i);
146         } while (bytes && i != msg->sg.end);
147         msg->sg.start = i;
148 }
149 EXPORT_SYMBOL_GPL(sk_msg_return_zero);
150
151 void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes)
152 {
153         int i = msg->sg.start;
154
155         do {
156                 struct scatterlist *sge = &msg->sg.data[i];
157                 int uncharge = (bytes < sge->length) ? bytes : sge->length;
158
159                 sk_mem_uncharge(sk, uncharge);
160                 bytes -= uncharge;
161                 sk_msg_iter_var_next(i);
162         } while (i != msg->sg.end);
163 }
164 EXPORT_SYMBOL_GPL(sk_msg_return);
165
166 static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i,
167                             bool charge)
168 {
169         struct scatterlist *sge = sk_msg_elem(msg, i);
170         u32 len = sge->length;
171
172         if (charge)
173                 sk_mem_uncharge(sk, len);
174         if (!msg->skb)
175                 put_page(sg_page(sge));
176         memset(sge, 0, sizeof(*sge));
177         return len;
178 }
179
180 static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i,
181                          bool charge)
182 {
183         struct scatterlist *sge = sk_msg_elem(msg, i);
184         int freed = 0;
185
186         while (msg->sg.size) {
187                 msg->sg.size -= sge->length;
188                 freed += sk_msg_free_elem(sk, msg, i, charge);
189                 sk_msg_iter_var_next(i);
190                 sk_msg_check_to_free(msg, i, msg->sg.size);
191                 sge = sk_msg_elem(msg, i);
192         }
193         if (msg->skb)
194                 consume_skb(msg->skb);
195         sk_msg_init(msg);
196         return freed;
197 }
198
199 int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg)
200 {
201         return __sk_msg_free(sk, msg, msg->sg.start, false);
202 }
203 EXPORT_SYMBOL_GPL(sk_msg_free_nocharge);
204
205 int sk_msg_free(struct sock *sk, struct sk_msg *msg)
206 {
207         return __sk_msg_free(sk, msg, msg->sg.start, true);
208 }
209 EXPORT_SYMBOL_GPL(sk_msg_free);
210
211 static void __sk_msg_free_partial(struct sock *sk, struct sk_msg *msg,
212                                   u32 bytes, bool charge)
213 {
214         struct scatterlist *sge;
215         u32 i = msg->sg.start;
216
217         while (bytes) {
218                 sge = sk_msg_elem(msg, i);
219                 if (!sge->length)
220                         break;
221                 if (bytes < sge->length) {
222                         if (charge)
223                                 sk_mem_uncharge(sk, bytes);
224                         sge->length -= bytes;
225                         sge->offset += bytes;
226                         msg->sg.size -= bytes;
227                         break;
228                 }
229
230                 msg->sg.size -= sge->length;
231                 bytes -= sge->length;
232                 sk_msg_free_elem(sk, msg, i, charge);
233                 sk_msg_iter_var_next(i);
234                 sk_msg_check_to_free(msg, i, bytes);
235         }
236         msg->sg.start = i;
237 }
238
239 void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes)
240 {
241         __sk_msg_free_partial(sk, msg, bytes, true);
242 }
243 EXPORT_SYMBOL_GPL(sk_msg_free_partial);
244
245 void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
246                                   u32 bytes)
247 {
248         __sk_msg_free_partial(sk, msg, bytes, false);
249 }
250
251 void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len)
252 {
253         int trim = msg->sg.size - len;
254         u32 i = msg->sg.end;
255
256         if (trim <= 0) {
257                 WARN_ON(trim < 0);
258                 return;
259         }
260
261         sk_msg_iter_var_prev(i);
262         msg->sg.size = len;
263         while (msg->sg.data[i].length &&
264                trim >= msg->sg.data[i].length) {
265                 trim -= msg->sg.data[i].length;
266                 sk_msg_free_elem(sk, msg, i, true);
267                 sk_msg_iter_var_prev(i);
268                 if (!trim)
269                         goto out;
270         }
271
272         msg->sg.data[i].length -= trim;
273         sk_mem_uncharge(sk, trim);
274         /* Adjust copybreak if it falls into the trimmed part of last buf */
275         if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length)
276                 msg->sg.copybreak = msg->sg.data[i].length;
277 out:
278         sk_msg_iter_var_next(i);
279         msg->sg.end = i;
280
281         /* If we trim data a full sg elem before curr pointer update
282          * copybreak and current so that any future copy operations
283          * start at new copy location.
284          * However trimed data that has not yet been used in a copy op
285          * does not require an update.
286          */
287         if (!msg->sg.size) {
288                 msg->sg.curr = msg->sg.start;
289                 msg->sg.copybreak = 0;
290         } else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >=
291                    sk_msg_iter_dist(msg->sg.start, msg->sg.end)) {
292                 sk_msg_iter_var_prev(i);
293                 msg->sg.curr = i;
294                 msg->sg.copybreak = msg->sg.data[i].length;
295         }
296 }
297 EXPORT_SYMBOL_GPL(sk_msg_trim);
298
299 int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
300                               struct sk_msg *msg, u32 bytes)
301 {
302         int i, maxpages, ret = 0, num_elems = sk_msg_elem_used(msg);
303         const int to_max_pages = MAX_MSG_FRAGS;
304         struct page *pages[MAX_MSG_FRAGS];
305         ssize_t orig, copied, use, offset;
306
307         orig = msg->sg.size;
308         while (bytes > 0) {
309                 i = 0;
310                 maxpages = to_max_pages - num_elems;
311                 if (maxpages == 0) {
312                         ret = -EFAULT;
313                         goto out;
314                 }
315
316                 copied = iov_iter_get_pages(from, pages, bytes, maxpages,
317                                             &offset);
318                 if (copied <= 0) {
319                         ret = -EFAULT;
320                         goto out;
321                 }
322
323                 iov_iter_advance(from, copied);
324                 bytes -= copied;
325                 msg->sg.size += copied;
326
327                 while (copied) {
328                         use = min_t(int, copied, PAGE_SIZE - offset);
329                         sg_set_page(&msg->sg.data[msg->sg.end],
330                                     pages[i], use, offset);
331                         sg_unmark_end(&msg->sg.data[msg->sg.end]);
332                         sk_mem_charge(sk, use);
333
334                         offset = 0;
335                         copied -= use;
336                         sk_msg_iter_next(msg, end);
337                         num_elems++;
338                         i++;
339                 }
340                 /* When zerocopy is mixed with sk_msg_*copy* operations we
341                  * may have a copybreak set in this case clear and prefer
342                  * zerocopy remainder when possible.
343                  */
344                 msg->sg.copybreak = 0;
345                 msg->sg.curr = msg->sg.end;
346         }
347 out:
348         /* Revert iov_iter updates, msg will need to use 'trim' later if it
349          * also needs to be cleared.
350          */
351         if (ret)
352                 iov_iter_revert(from, msg->sg.size - orig);
353         return ret;
354 }
355 EXPORT_SYMBOL_GPL(sk_msg_zerocopy_from_iter);
356
357 int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
358                              struct sk_msg *msg, u32 bytes)
359 {
360         int ret = -ENOSPC, i = msg->sg.curr;
361         struct scatterlist *sge;
362         u32 copy, buf_size;
363         void *to;
364
365         do {
366                 sge = sk_msg_elem(msg, i);
367                 /* This is possible if a trim operation shrunk the buffer */
368                 if (msg->sg.copybreak >= sge->length) {
369                         msg->sg.copybreak = 0;
370                         sk_msg_iter_var_next(i);
371                         if (i == msg->sg.end)
372                                 break;
373                         sge = sk_msg_elem(msg, i);
374                 }
375
376                 buf_size = sge->length - msg->sg.copybreak;
377                 copy = (buf_size > bytes) ? bytes : buf_size;
378                 to = sg_virt(sge) + msg->sg.copybreak;
379                 msg->sg.copybreak += copy;
380                 if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
381                         ret = copy_from_iter_nocache(to, copy, from);
382                 else
383                         ret = copy_from_iter(to, copy, from);
384                 if (ret != copy) {
385                         ret = -EFAULT;
386                         goto out;
387                 }
388                 bytes -= copy;
389                 if (!bytes)
390                         break;
391                 msg->sg.copybreak = 0;
392                 sk_msg_iter_var_next(i);
393         } while (i != msg->sg.end);
394 out:
395         msg->sg.curr = i;
396         return ret;
397 }
398 EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
399
400 static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
401 {
402         struct sock *sk = psock->sk;
403         int copied = 0, num_sge;
404         struct sk_msg *msg;
405
406         msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC);
407         if (unlikely(!msg))
408                 return -EAGAIN;
409         if (!sk_rmem_schedule(sk, skb, skb->len)) {
410                 kfree(msg);
411                 return -EAGAIN;
412         }
413
414         sk_msg_init(msg);
415         num_sge = skb_to_sgvec(skb, msg->sg.data, 0, skb->len);
416         if (unlikely(num_sge < 0)) {
417                 kfree(msg);
418                 return num_sge;
419         }
420
421         sk_mem_charge(sk, skb->len);
422         copied = skb->len;
423         msg->sg.start = 0;
424         msg->sg.size = copied;
425         msg->sg.end = num_sge == MAX_MSG_FRAGS ? 0 : num_sge;
426         msg->skb = skb;
427
428         sk_psock_queue_msg(psock, msg);
429         sk_psock_data_ready(sk, psock);
430         return copied;
431 }
432
433 static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
434                                u32 off, u32 len, bool ingress)
435 {
436         if (ingress)
437                 return sk_psock_skb_ingress(psock, skb);
438         else
439                 return skb_send_sock_locked(psock->sk, skb, off, len);
440 }
441
442 static void sk_psock_backlog(struct work_struct *work)
443 {
444         struct sk_psock *psock = container_of(work, struct sk_psock, work);
445         struct sk_psock_work_state *state = &psock->work_state;
446         struct sk_buff *skb;
447         bool ingress;
448         u32 len, off;
449         int ret;
450
451         /* Lock sock to avoid losing sk_socket during loop. */
452         lock_sock(psock->sk);
453         if (state->skb) {
454                 skb = state->skb;
455                 len = state->len;
456                 off = state->off;
457                 state->skb = NULL;
458                 goto start;
459         }
460
461         while ((skb = skb_dequeue(&psock->ingress_skb))) {
462                 len = skb->len;
463                 off = 0;
464 start:
465                 ingress = tcp_skb_bpf_ingress(skb);
466                 do {
467                         ret = -EIO;
468                         if (likely(psock->sk->sk_socket))
469                                 ret = sk_psock_handle_skb(psock, skb, off,
470                                                           len, ingress);
471                         if (ret <= 0) {
472                                 if (ret == -EAGAIN) {
473                                         state->skb = skb;
474                                         state->len = len;
475                                         state->off = off;
476                                         goto end;
477                                 }
478                                 /* Hard errors break pipe and stop xmit. */
479                                 sk_psock_report_error(psock, ret ? -ret : EPIPE);
480                                 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
481                                 kfree_skb(skb);
482                                 goto end;
483                         }
484                         off += ret;
485                         len -= ret;
486                 } while (len);
487
488                 if (!ingress)
489                         kfree_skb(skb);
490         }
491 end:
492         release_sock(psock->sk);
493 }
494
495 struct sk_psock *sk_psock_init(struct sock *sk, int node)
496 {
497         struct sk_psock *psock = kzalloc_node(sizeof(*psock),
498                                               GFP_ATOMIC | __GFP_NOWARN,
499                                               node);
500         if (!psock)
501                 return NULL;
502
503         psock->sk = sk;
504         psock->eval =  __SK_NONE;
505
506         INIT_LIST_HEAD(&psock->link);
507         spin_lock_init(&psock->link_lock);
508
509         INIT_WORK(&psock->work, sk_psock_backlog);
510         INIT_LIST_HEAD(&psock->ingress_msg);
511         skb_queue_head_init(&psock->ingress_skb);
512
513         sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
514         refcount_set(&psock->refcnt, 1);
515
516         rcu_assign_sk_user_data(sk, psock);
517         sock_hold(sk);
518
519         return psock;
520 }
521 EXPORT_SYMBOL_GPL(sk_psock_init);
522
523 struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock)
524 {
525         struct sk_psock_link *link;
526
527         spin_lock_bh(&psock->link_lock);
528         link = list_first_entry_or_null(&psock->link, struct sk_psock_link,
529                                         list);
530         if (link)
531                 list_del(&link->list);
532         spin_unlock_bh(&psock->link_lock);
533         return link;
534 }
535
536 void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
537 {
538         struct sk_msg *msg, *tmp;
539
540         list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) {
541                 list_del(&msg->list);
542                 sk_msg_free(psock->sk, msg);
543                 kfree(msg);
544         }
545 }
546
547 static void sk_psock_zap_ingress(struct sk_psock *psock)
548 {
549         __skb_queue_purge(&psock->ingress_skb);
550         __sk_psock_purge_ingress_msg(psock);
551 }
552
553 static void sk_psock_link_destroy(struct sk_psock *psock)
554 {
555         struct sk_psock_link *link, *tmp;
556
557         list_for_each_entry_safe(link, tmp, &psock->link, list) {
558                 list_del(&link->list);
559                 sk_psock_free_link(link);
560         }
561 }
562
563 static void sk_psock_destroy_deferred(struct work_struct *gc)
564 {
565         struct sk_psock *psock = container_of(gc, struct sk_psock, gc);
566
567         /* No sk_callback_lock since already detached. */
568
569         /* Parser has been stopped */
570         if (psock->progs.skb_parser)
571                 strp_done(&psock->parser.strp);
572
573         cancel_work_sync(&psock->work);
574
575         psock_progs_drop(&psock->progs);
576
577         sk_psock_link_destroy(psock);
578         sk_psock_cork_free(psock);
579         sk_psock_zap_ingress(psock);
580
581         if (psock->sk_redir)
582                 sock_put(psock->sk_redir);
583         sock_put(psock->sk);
584         kfree(psock);
585 }
586
587 void sk_psock_destroy(struct rcu_head *rcu)
588 {
589         struct sk_psock *psock = container_of(rcu, struct sk_psock, rcu);
590
591         INIT_WORK(&psock->gc, sk_psock_destroy_deferred);
592         schedule_work(&psock->gc);
593 }
594 EXPORT_SYMBOL_GPL(sk_psock_destroy);
595
596 void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
597 {
598         sk_psock_cork_free(psock);
599         sk_psock_zap_ingress(psock);
600
601         write_lock_bh(&sk->sk_callback_lock);
602         sk_psock_restore_proto(sk, psock);
603         rcu_assign_sk_user_data(sk, NULL);
604         if (psock->progs.skb_parser)
605                 sk_psock_stop_strp(sk, psock);
606         write_unlock_bh(&sk->sk_callback_lock);
607         sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
608
609         call_rcu(&psock->rcu, sk_psock_destroy);
610 }
611 EXPORT_SYMBOL_GPL(sk_psock_drop);
612
613 static int sk_psock_map_verd(int verdict, bool redir)
614 {
615         switch (verdict) {
616         case SK_PASS:
617                 return redir ? __SK_REDIRECT : __SK_PASS;
618         case SK_DROP:
619         default:
620                 break;
621         }
622
623         return __SK_DROP;
624 }
625
626 int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
627                          struct sk_msg *msg)
628 {
629         struct bpf_prog *prog;
630         int ret;
631
632         preempt_disable();
633         rcu_read_lock();
634         prog = READ_ONCE(psock->progs.msg_parser);
635         if (unlikely(!prog)) {
636                 ret = __SK_PASS;
637                 goto out;
638         }
639
640         sk_msg_compute_data_pointers(msg);
641         msg->sk = sk;
642         ret = BPF_PROG_RUN(prog, msg);
643         ret = sk_psock_map_verd(ret, msg->sk_redir);
644         psock->apply_bytes = msg->apply_bytes;
645         if (ret == __SK_REDIRECT) {
646                 if (psock->sk_redir)
647                         sock_put(psock->sk_redir);
648                 psock->sk_redir = msg->sk_redir;
649                 if (!psock->sk_redir) {
650                         ret = __SK_DROP;
651                         goto out;
652                 }
653                 sock_hold(psock->sk_redir);
654         }
655 out:
656         rcu_read_unlock();
657         preempt_enable();
658         return ret;
659 }
660 EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
661
662 static int sk_psock_bpf_run(struct sk_psock *psock, struct bpf_prog *prog,
663                             struct sk_buff *skb)
664 {
665         int ret;
666
667         skb->sk = psock->sk;
668         bpf_compute_data_end_sk_skb(skb);
669         preempt_disable();
670         ret = BPF_PROG_RUN(prog, skb);
671         preempt_enable();
672         /* strparser clones the skb before handing it to a upper layer,
673          * meaning skb_orphan has been called. We NULL sk on the way out
674          * to ensure we don't trigger a BUG_ON() in skb/sk operations
675          * later and because we are not charging the memory of this skb
676          * to any socket yet.
677          */
678         skb->sk = NULL;
679         return ret;
680 }
681
682 static struct sk_psock *sk_psock_from_strp(struct strparser *strp)
683 {
684         struct sk_psock_parser *parser;
685
686         parser = container_of(strp, struct sk_psock_parser, strp);
687         return container_of(parser, struct sk_psock, parser);
688 }
689
690 static void sk_psock_verdict_apply(struct sk_psock *psock,
691                                    struct sk_buff *skb, int verdict)
692 {
693         struct sk_psock *psock_other;
694         struct sock *sk_other;
695         bool ingress;
696
697         switch (verdict) {
698         case __SK_PASS:
699                 sk_other = psock->sk;
700                 if (sock_flag(sk_other, SOCK_DEAD) ||
701                     !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
702                         goto out_free;
703                 }
704                 if (atomic_read(&sk_other->sk_rmem_alloc) <=
705                     sk_other->sk_rcvbuf) {
706                         struct tcp_skb_cb *tcp = TCP_SKB_CB(skb);
707
708                         tcp->bpf.flags |= BPF_F_INGRESS;
709                         skb_queue_tail(&psock->ingress_skb, skb);
710                         schedule_work(&psock->work);
711                         break;
712                 }
713                 goto out_free;
714         case __SK_REDIRECT:
715                 sk_other = tcp_skb_bpf_redirect_fetch(skb);
716                 if (unlikely(!sk_other))
717                         goto out_free;
718                 psock_other = sk_psock(sk_other);
719                 if (!psock_other || sock_flag(sk_other, SOCK_DEAD) ||
720                     !sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED))
721                         goto out_free;
722                 ingress = tcp_skb_bpf_ingress(skb);
723                 if ((!ingress && sock_writeable(sk_other)) ||
724                     (ingress &&
725                      atomic_read(&sk_other->sk_rmem_alloc) <=
726                      sk_other->sk_rcvbuf)) {
727                         if (!ingress)
728                                 skb_set_owner_w(skb, sk_other);
729                         skb_queue_tail(&psock_other->ingress_skb, skb);
730                         schedule_work(&psock_other->work);
731                         break;
732                 }
733                 /* fall-through */
734         case __SK_DROP:
735                 /* fall-through */
736         default:
737 out_free:
738                 kfree_skb(skb);
739         }
740 }
741
742 static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
743 {
744         struct sk_psock *psock = sk_psock_from_strp(strp);
745         struct bpf_prog *prog;
746         int ret = __SK_DROP;
747
748         rcu_read_lock();
749         prog = READ_ONCE(psock->progs.skb_verdict);
750         if (likely(prog)) {
751                 skb_orphan(skb);
752                 tcp_skb_bpf_redirect_clear(skb);
753                 ret = sk_psock_bpf_run(psock, prog, skb);
754                 ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
755         }
756         rcu_read_unlock();
757         sk_psock_verdict_apply(psock, skb, ret);
758 }
759
760 static int sk_psock_strp_read_done(struct strparser *strp, int err)
761 {
762         return err;
763 }
764
765 static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
766 {
767         struct sk_psock *psock = sk_psock_from_strp(strp);
768         struct bpf_prog *prog;
769         int ret = skb->len;
770
771         rcu_read_lock();
772         prog = READ_ONCE(psock->progs.skb_parser);
773         if (likely(prog))
774                 ret = sk_psock_bpf_run(psock, prog, skb);
775         rcu_read_unlock();
776         return ret;
777 }
778
779 /* Called with socket lock held. */
780 static void sk_psock_strp_data_ready(struct sock *sk)
781 {
782         struct sk_psock *psock;
783
784         rcu_read_lock();
785         psock = sk_psock(sk);
786         if (likely(psock)) {
787                 write_lock_bh(&sk->sk_callback_lock);
788                 strp_data_ready(&psock->parser.strp);
789                 write_unlock_bh(&sk->sk_callback_lock);
790         }
791         rcu_read_unlock();
792 }
793
794 static void sk_psock_write_space(struct sock *sk)
795 {
796         struct sk_psock *psock;
797         void (*write_space)(struct sock *sk);
798
799         rcu_read_lock();
800         psock = sk_psock(sk);
801         if (likely(psock && sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)))
802                 schedule_work(&psock->work);
803         write_space = psock->saved_write_space;
804         rcu_read_unlock();
805         write_space(sk);
806 }
807
808 int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
809 {
810         static const struct strp_callbacks cb = {
811                 .rcv_msg        = sk_psock_strp_read,
812                 .read_sock_done = sk_psock_strp_read_done,
813                 .parse_msg      = sk_psock_strp_parse,
814         };
815
816         psock->parser.enabled = false;
817         return strp_init(&psock->parser.strp, sk, &cb);
818 }
819
820 void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
821 {
822         struct sk_psock_parser *parser = &psock->parser;
823
824         if (parser->enabled)
825                 return;
826
827         parser->saved_data_ready = sk->sk_data_ready;
828         sk->sk_data_ready = sk_psock_strp_data_ready;
829         sk->sk_write_space = sk_psock_write_space;
830         parser->enabled = true;
831 }
832
833 void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
834 {
835         struct sk_psock_parser *parser = &psock->parser;
836
837         if (!parser->enabled)
838                 return;
839
840         sk->sk_data_ready = parser->saved_data_ready;
841         parser->saved_data_ready = NULL;
842         strp_stop(&parser->strp);
843         parser->enabled = false;
844 }